repo_name
stringlengths 7
90
| path
stringlengths 5
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 976
581k
| license
stringclasses 15
values |
---|---|---|---|---|---|
DoWhatILove/turtle | ai/natural language understanding/conceptnet/distinguish_attributes.py | 1 | 4801 | '''
the paper: Luminoso at SemEval-2018 Task 10: distinguishing attributes using text corpora and relational knowledge.
this refers the blog: https://blog.conceptnet.io/posts/2018/distinguishing-attributes-using-conceptnet/
'''
from sklearn.metrics import f1_score
import numpy as np
import pandas as pd
def text_to_uri(text):
return '/c/en/' + text.lower().replace('-', '_')
def normalize_vec(vec):
norm = vec.dot(vec)**0.5
if norm == 0:
return vec
return vec/norm
class AttributeHeuristic(object):
def __init__(self, hdf5_filename):
'''
load a word embedding matrix that is the 'mat' member of an HDF5 file,
with UTF-8 lables for its rows.
(this is the format that conceptnet numberbatch word embeddings use.)
'''
self.embeddings = pd.read_hdf(hdf5_filename, 'mat', encoding='utf-8')
self.cache = {}
def get_vector(self, term):
'''
look up the vector for a term, returning it normalized to a unit vector.
if the term is out-of-vocabulary, return a zero vector.
Because many terms appear repeatedly in the data, cache the results.
'''
uri = text_to_uri(term)
if uri in self.cache:
return self.cache[uri]
else:
try:
vec = normalize_vec(self.embeddings.loc[uri])
except KeyError:
vec = pd.Series(index=self.embeddings.columns).fillna(0)
self.cache[uri] = vec
return vec
def get_similarity(self, term1, term2):
'''
get the cosine similarity between the embeddings of two terms
'''
return self.get_vector(term1).dot(self.get_vector(term2))
def compare_attributes(self, term1, term2, attribute):
'''
our heuristic for whether an attribute applies more to term1 than to term2:
find the cosine similarity of each term with the attribute, and take the difference
of the square roots of those similarities
'''
match1 = max(0, self.get_similarity(term1, attribute)) ** 0.5
match2 = max(0, self.get_similarity(term2, attribute)) ** 0.5
return match1 - match2
def classify(self, term1, term2, attribute, threshold):
'''
convert the attribute heuristic into a yes-or-no decision, by testing
whether the difference is larger than a given threshold.
'''
return self.compare_attributes(term1, term2, attribute) > threshold
def evaluate(self, semeval_filename, threshold):
'''
evaluate the heuristic on a file containing instances of this form:
banjo, harmonica, stations, 0
mushroom, onions, stem, 1
return the macro-averaged F1 score.
'''
our_answers = []
real_answers = []
for line in open(semeval_filename, encoding='utf-8'):
term1, term2, attribute, strval = line.rstrip().split(',')
discriminative = bool(int(strval))
real_answers.append(discriminative)
our_answers.append(self.classify(
term1, term2, attribute, threshold))
return f1_score(real_answers, our_answers, average='macro')
def show_some_examples(heuristic, term1, term2, attribute, because = ''):
difference = heuristic.compare_attributes(term1, term2, attribute)
print(because)
print(
f'term1: {term1}, term2: {term2}, attribute: {attribute}... their difference score is {difference}')
def main():
heuristic = AttributeHeuristic(
r'E:\data\conceptnet\numberbatch-20180108-biased.h5')
f1_train = heuristic.evaluate(
r'E:\data\conceptnet\discriminatt-train.txt', threshold=0.1)
print(f'f1 score over training data {f1_train}')
f1_validation = heuristic.evaluate(
r'E:\data\conceptnet\discriminatt-validation.txt', threshold=0.1)
print(f'f1 score over validation data {f1_validation}')
f1_test = heuristic.evaluate(
r'E:\data\conceptnet\discriminatt-test.txt', threshold=0.1)
print(f'f1 score over test data {f1_test}')
show_some_examples(heuristic, 'window', 'door', 'glass','most of window are made of glass, most doors are not ...')
show_some_examples(heuristic, 'mushroom', 'onions', 'stem', 'mushrooms has stems, while onions do not ...')
show_some_examples(heuristic, 'cappuccino', 'americano', 'milk', 'cappuccino contains milk, while americano does not ...')
show_some_examples(heuristic,'train','subway','rails','trains and subways both involve rails ...')
show_some_examples(heuristic,'finger','soup','water','water is a discriminative attribute that distinguishes soup from finger, nor figner from soup. such negative value ...')
if __name__ == '__main__':
main()
| mit |
andaag/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
nicjhan/MOM6-examples | tools/analysis/MOM6_annual_analysis.py | 6 | 4961 | # Script to plot sub-surface ocean temperature drift.
# Analysis: using newer python 2.7.3
"""
module purge
module use -a /home/fms/local/modulefiles
module load gcc
module load netcdf/4.2
module load python/2.7.3
"""
import os
import math
import numpy as np
from numpy import ma
from netCDF4 import Dataset, MFDataset, num2date, date2num
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# -----------------------------------------------------------------------------
# Function to convert from page coordinates to non-dimensional coordinates
def page_to_ndc( panel, page ):
if len(panel) == 4:
ndc = [ 0.0, 0.0, 0.0, 0.0 ]
ndc[0] = (panel[0]-page[0])/(page[2]-page[0])
ndc[1] = (panel[1]-page[1])/(page[3]-page[1])
ndc[2] = (panel[2]-panel[0])/(page[2]-page[0])
ndc[3] = (panel[3]-panel[1])/(page[3]-page[1])
return ndc
elif len(panel) == 2:
ndc = [ 0.0, 0.0 ]
ndc[0] = (panel[0]-page[0])/(page[2]-page[0])
ndc[1] = (panel[1]-page[1])/(page[3]-page[1])
return ndc
# -----------------------------------------------------------------------------
# Function to discretize colormap with option to white out certain regions
def cmap_discretize(cmap, N, white=None):
"""Return a discrete colormap from the continuous colormap cmap.
cmap: colormap instance, eg. cm.jet.
N: number of colors.
Example
x = resize(arange(100), (5,100))
djet = cmap_discretize(cm.jet, 5)
imshow(x, cmap=djet)
"""
if type(cmap) == str:
cmap = get_cmap(cmap)
colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.)))
colors_rgba = cmap(colors_i)
# White levels?
if white != None:
for i in range(N):
if white[i] > 0.0:
colors_rgba[i,:] = 1.0
# Construct colormap distionary
indices = np.linspace(0, 1., N+1)
cdict = {}
for ki,key in enumerate(('red','green','blue')):
cdict[key] = [ (indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki]) for i in xrange(N+1) ]
# Return colormap object.
return matplotlib.colors.LinearSegmentedColormap(cmap.name + "_%d"%N, cdict, 1024)
# -----------------------------------------------------------------------------
# Radius of the earth (shared/constants/constants.F90)
radius = 6371.0e3
# Ocean heat capacity (ocean_core/ocean_parameters.F90)
cp_ocean = 3992.10322329649
# Read 'descriptor' and 'years' from external file
f = open("files.txt")
for line in f.readlines():
exec(line.lstrip())
f.close()
model_label = "%s (%s)" % (descriptor,years)
# TMPDIR where input files are located
tmpdir = "./"
# Open input files
#fstatic = Dataset(tmpdir+'19000101.ocean_geometry.nc', 'r')
fstatic = MFDataset(tmpdir+'*.ocean_static.nc')
ftemp = MFDataset(tmpdir+'*.ocean_annual.nc')
# Time info
time = ftemp.variables["time"]
ntimes = len(time[:])
date = num2date(time,time.units,time.calendar.lower())
year = [d.year for d in date]
time_days = date2num(date,'days since 01-01-0001',time.calendar.lower())
# Grid info
#area = fstatic.variables["Ah"][:]
area = fstatic.variables["area_t"][:]
z = ftemp.variables["zl"][:]
nz = len(z)
# Input variables
temp = ftemp.variables["temp"]
salt = ftemp.variables["salt"]
# Create arrays to hold derived variables
ztemp = ma.array( np.zeros((ntimes,nz), 'float64'), mask=True )
zsalt = ma.array( np.zeros((ntimes,nz), 'float64'), mask=True )
# Loop over time
#for itime in range(ntimes):
for itime in range(1):
# Compute vertical profile of zemperature
tmp = temp[itime,:,:,:]
contmp = salt[itime,:,:,:]
for iz in range(nz):
ztemp[itime,iz] = ma.average(tmp[iz,:,:], weights=area)
zsalt[itime,iz] = ma.average(contmp[iz,:,:], weights=area)
# Transpose for compatibility with contour plots
ztemp = ztemp.transpose()
zsalt = zsalt.transpose()
# Close files
fstatic.close()
ftemp.close()
# -----------------------------------------------------------------------------
# Create plot
# Specify plots position in points: [left bottom right top]
page = [ 0.0, 0.0, 612.0, 792.0 ] # corresponding to papertype='letter'
plot1a = [ 89.0, 497.0, 480.0, 670.0 ]
plot1b = [ 89.0, 324.0, 480.0, 497.0 ]
cbar = [ 506.0, 324.0, 531.0, 670.0 ]
plot2 = [ 89.0, 99.0, 480.0, 272.0 ]
plot = [ 89.0, 99.0, 480.0, 670.0 ]
#plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.dpi'] = 72.0
plt.rcParams['figure.figsize'] = [ (page[2]-page[0])/72.0, (page[3]-page[1])/72.0 ]
fig = plt.figure()
ax1a = plt.axes(page_to_ndc(plot,page))
ax1a.set_ylim(5300,0)
ax1a.set_ylabel('Depth (m)')
ax1a.set_xlabel('Ocean Temp (C)',color='r')
ax1a.plot(ztemp,z,ls='-',color='r')
ax1b = ax1a.twiny()
ax1b.set_xlabel('Ocean Salinity (PSU)',color='b')
ax1b.plot(zsalt,z,ls='-',color='b')
# Figure title
xy = page_to_ndc([280.0,730.0],page)
fig.text(xy[0],xy[1],model_label,ha="center",size="x-large")
# Save figure
fig.savefig("ocean_temp_salt.ps")
| gpl-3.0 |
magne-max/zipline-ja | zipline/utils/calendars/us_holidays.py | 6 | 4015 | from pandas import (
Timestamp,
DateOffset,
date_range,
)
from pandas.tseries.holiday import (
Holiday,
sunday_to_monday,
nearest_workday,
)
from dateutil.relativedelta import (
MO,
TH
)
from pandas.tseries.offsets import Day
from zipline.utils.calendars.trading_calendar import (
MONDAY,
TUESDAY,
WEDNESDAY,
THURSDAY,
FRIDAY,
)
# These have the same definition, but are used in different places because the
# NYSE closed at 2:00 PM on Christmas Eve until 1993.
from zipline.utils.pandas_utils import july_5th_holiday_observance
ChristmasEveBefore1993 = Holiday(
'Christmas Eve',
month=12,
day=24,
end_date=Timestamp('1993-01-01'),
# When Christmas is a Saturday, the 24th is a full holiday.
days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY),
)
ChristmasEveInOrAfter1993 = Holiday(
'Christmas Eve',
month=12,
day=24,
start_date=Timestamp('1993-01-01'),
# When Christmas is a Saturday, the 24th is a full holiday.
days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY),
)
USNewYearsDay = Holiday(
'New Years Day',
month=1,
day=1,
# When Jan 1 is a Sunday, US markets observe the subsequent Monday.
# When Jan 1 is a Saturday (as in 2005 and 2011), no holiday is observed.
observance=sunday_to_monday
)
USMartinLutherKingJrAfter1998 = Holiday(
'Dr. Martin Luther King Jr. Day',
month=1,
day=1,
# The US markets didn't observe MLK day as a holiday until 1998.
start_date=Timestamp('1998-01-01'),
offset=DateOffset(weekday=MO(3)),
)
USMemorialDay = Holiday(
# NOTE: The definition for Memorial Day is incorrect as of pandas 0.16.0.
# See https://github.com/pydata/pandas/issues/9760.
'Memorial Day',
month=5,
day=25,
offset=DateOffset(weekday=MO(1)),
)
USIndependenceDay = Holiday(
'July 4th',
month=7,
day=4,
observance=nearest_workday,
)
Christmas = Holiday(
'Christmas',
month=12,
day=25,
observance=nearest_workday,
)
MonTuesThursBeforeIndependenceDay = Holiday(
# When July 4th is a Tuesday, Wednesday, or Friday, the previous day is a
# half day.
'Mondays, Tuesdays, and Thursdays Before Independence Day',
month=7,
day=3,
days_of_week=(MONDAY, TUESDAY, THURSDAY),
start_date=Timestamp("1995-01-01"),
)
FridayAfterIndependenceDayExcept2013 = Holiday(
# When July 4th is a Thursday, the next day is a half day (except in 2013,
# when, for no explicable reason, Wednesday was a half day instead).
"Fridays after Independence Day that aren't in 2013",
month=7,
day=5,
days_of_week=(FRIDAY,),
observance=july_5th_holiday_observance,
start_date=Timestamp("1995-01-01"),
)
USBlackFridayBefore1993 = Holiday(
'Black Friday',
month=11,
day=1,
# Black Friday was not observed until 1992.
start_date=Timestamp('1992-01-01'),
end_date=Timestamp('1993-01-01'),
offset=[DateOffset(weekday=TH(4)), Day(1)],
)
USBlackFridayInOrAfter1993 = Holiday(
'Black Friday',
month=11,
day=1,
start_date=Timestamp('1993-01-01'),
offset=[DateOffset(weekday=TH(4)), Day(1)],
)
BattleOfGettysburg = Holiday(
# All of the floor traders in Chicago were sent to PA
'Markets were closed during the battle of Gettysburg',
month=7,
day=(1, 2, 3),
start_date=Timestamp("1863-07-01"),
end_date=Timestamp("1863-07-03")
)
# http://en.wikipedia.org/wiki/Aftermath_of_the_September_11_attacks
September11Closings = date_range('2001-09-11', '2001-09-16', tz='UTC')
# http://en.wikipedia.org/wiki/Hurricane_sandy
HurricaneSandyClosings = date_range(
'2012-10-29',
'2012-10-30',
tz='UTC'
)
# National Days of Mourning
# - President Richard Nixon - April 27, 1994
# - President Ronald W. Reagan - June 11, 2004
# - President Gerald R. Ford - Jan 2, 2007
USNationalDaysofMourning = [
Timestamp('1994-04-27', tz='UTC'),
Timestamp('2004-06-11', tz='UTC'),
Timestamp('2007-01-02', tz='UTC'),
]
| apache-2.0 |
yeasy/lazyctrl | others/lc_sim/src/topo.py | 1 | 5742 | #!/usr/bin/python
'''The Topology module for the HC project.
@version: 1.0
@author: U{Baohua Yang<mailto:yangbaohua@gmail.com>}
@created: Oct 12, 2011
@last update: Oct 22, 2011
@see: U{<https://github.com/yeasy/lazyctrl>}
@TODO: nothing
'''
import networkx as nx
import os.path, sys
from matplotlib import pyplot as plt
class Topo:
""" Topology class.
"""
def __init__(self, name, num_port=16):
""" Generate a topo.
@param name: Topology name
@param num_port: Number of ports of each switch
"""
self.name = name
self.G = nx.Graph()
self.num_port = num_port
self.coreSWList, self.aggSWList, self.edgeSWList = [], [], []
def importFrom(self,fn):
"""
Import the topo from outside file.
@param fn: Name of the outside file
"""
if not fn:
return
f = open(fn,'r')
try:
for l in f.readlines():
src,dst,w = l.split()
src,dst,w = int(src),int(dst),float(w)
if src not in self.edgeSWList:
self.edgeSWList.append(src)
if dst not in self.edgeSWList:
self.edgeSWList.append(dst)
self.G.add_edge(src,dst,weight=w) #add new edge
except:
print "[Topo.importFrom()]Error in open file",fn
finally:
f.close()
def info(self):
""" Print information of the topology.
"""
print "Name=%s" % self.name
print "#Nodes=%u" % len(self.G.nodes())
print "#Edges=%u" % len(self.G.edges())
def exportPng(self, fn='test.png'):
""" Save the topo into a png file.
@param fn: The name of exported file
"""
G = self.G
if not fn:
fn = self.name
if G.is_directed():
test_graph = G.to_undirected()
else:
test_graph = G
fn_out_png = os.path.splitext(fn)[0] + ".png"
plt.figure(figsize=(8, 8))
#pos = nx.graphviz_layout(test_graph, prog='neato')
#pos = nx.spring_layout(test_graph)
pos = nx.shell_layout(test_graph)
nx.draw(test_graph, pos, alpha=1.0, node_size=160, node_color='#eeeeee')
#xmax = 1.1 * max(xx for xx, yy in pos.values())
#ymax = 1.1 * max(yy for xx, yy in pos.values())
#plt.xlim(-1*xmax, xmax)
#plt.ylim(-1*xmax, ymax)
plt.savefig(fn_out_png)
def exportSW(self, k, switchfile='switch.txt'):
""" Save the switch information into switch.txt.
@param k: number of ports of each edge switch
@param switchfile: out file name
"""
host_id = 0 #start with 0
with open(switchfile, 'w') as f:
try:
for sw in self.aggSWList:
for i in range(k):
f.write("%u %u\n" % (host_id, sw))
host_id += 1
except:
ex = sys.exc_info()[2].tb_frame.f_back
print "[file %s, line %s, module %s]: Error when exportSW to %s, " \
% (__file__, ex.f_lineno, __name__, switchfile)
f.close()
class Random(Topo):
""" Random Topology.
"""
def __init__(self, n, m, seed=None):
""" Produces a graph picked randomly
out of the set of all graphs
with n nodes and m edges.
@param n: Number of nodes
@param m: Number of edges
@param seed: Seed for random number generator (default=None).
@return: The Topology
"""
Topo.__init__(self, name='Random')
self.n = n
self.m = m
self.G = nx.gnm_random_graph(n, m)
class Flat(Topo):
""" Flat Topology with only edge switches.
"""
def __init__(self, num_sw=16, num_port=4):
""" Produces a flat topo with only edge switches.
@param num_sw: The number of switches
@param num_port: The number of ports of each switch
@return: The Topology
"""
Topo.__init__(self, name='Flat', num_port=num_port)
self.edgeSWList = range(num_sw)
self.G.add_nodes_from(self.edgeSWList, type='edge_switch') #edge sw
class FatTree(Topo):
""" FatTree Topology.
"""
def __init__(self, k):
""" Generate the topo.
@param k: k-port switch supports (k^2)*5/4 sws and (k^3)/4 hosts
"""
Topo.__init__(self, name='FatTree', num_port=k)
self.coreSWList = [i for i in range(k ** 2 / 4)]
self.G.add_nodes_from(self.coreSWList, type='core_switch') #core sw
self.aggSWList = [i + len(self.coreSWList) for i in range(k ** 2 / 2)]
self.G.add_nodes_from(self.aggSWList, type='agg_switch') #agg sw
self.edgeSWList = [i + len(self.coreSWList) + len(self.aggSWList) for i in range(k ** 2 / 2)]
self.G.add_nodes_from(self.edgeSWList, type='edge_switch') #edge sw
for p in range(k): #for pod p
for i in range(k / 2): #for each agg switch
for j in range(k / 2): #for each port of the agg switch
self.G.add_edge(self.aggSWList[i + p * (k / 2)], self.coreSWList[j + i * (k / 2)])
for j in range(k / 2): #for each edge switch in the pod
self.G.add_edge(self.aggSWList[i + p * (k / 2)], self.edgeSWList[j + p * (k / 2)])
if __name__ == "__main__":
#Random(10,20).info()
#Topo('test').info()
#fattree = FatTree(48)
#fattree.info()
#fattree.exportSW(48, 'testSW.txt')
#fattree.export('test.png')
flat = Flat()
flat.info()
topo = Topo('test')
topo.importFrom("./sw_pair_weight.txt")
topo.exportPng('sw_pair_weight.png')
| apache-2.0 |
BitTiger-MP/DS502-AI-Engineer | DS502-1702/Homework/week2_homework_solution.py | 1 | 2319 | # coding=utf-8
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
#
# 构造训练数据
x = np.arange(0., 10., 0.2)
m = len(x) # 训练数据点数目
x0 = np.full(m, 1.0)
input_data = np.vstack([x0, x]).T # 将偏置b作为权向量的第一个分量
target_data = 2 * x + 5 + np.random.randn(m)
# 定义batch size
batch_size = 10
# 两种终止条件
loop_max = 10000 # 最大迭代次数(防止死循环)
epsilon = 1e-3
# 初始化权值
np.random.seed(0)
w = np.random.randn(2)
# w = np.zeros(2)
alpha = 0.001 # 步长(注意取值过大会导致振荡,过小收敛速度变慢)
diff = 0.
error = np.zeros(2)
count = 0 # 循环次数
finish = 0 # 终止标志
error_list = []
# -----------------------------------------------批量梯度下降法-----------------------------------------------------------
while count < loop_max:
count += 1
# 标准梯度下降是在权值更新前对所有样例汇总误差,而随机梯度下降的权值是通过考查某个训练样例来更新的
# 在标准梯度下降中,权值更新的每一步对多个样例求和,需要更多的计算
for batch in xrange(0, m, batch_size):
x_batch = input_data[batch:batch + batch_size]
y_batch = target_data[batch:batch + batch_size]
sum_m = np.zeros(2)
sum_error = np.zeros(2)
for i in range(len(x_batch)):
error = np.dot(w, x_batch[i]) - y_batch[i]
sum_error += error
dif = error * x_batch[i]
sum_m += dif
w = w - alpha * sum_m # 注意步长alpha的取值,过大会导致振荡
error_list.append(np.sum(sum_error)**2)
# 判断是否已收敛
if np.linalg.norm(w - error) < epsilon:
finish = 1
break
else:
error = w
print 'loop count = %d' % count, '\tw:[%f, %f]' % (w[0], w[1])
# ----------------------------------------------------------------------------------------------------------------------
# check with scipy linear regression
slope, intercept, r_value, p_value, slope_std_error = stats.linregress(x, target_data)
print 'intercept = %s slope = %s' % (intercept, slope)
plt.plot(range(len(error_list[0:100])), error_list[0:100])
plt.show()
plt.plot(x, target_data, 'k+')
plt.plot(x, w[1] * x + w[0], 'r')
plt.show()
| apache-2.0 |
chrsrds/scikit-learn | benchmarks/bench_text_vectorizers.py | 15 | 2047 | """
To run this benchmark, you will need,
* scikit-learn
* pandas
* memory_profiler
* psutil (optional, but recommended)
"""
import timeit
import itertools
import numpy as np
import pandas as pd
from memory_profiler import memory_usage
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import (CountVectorizer, TfidfVectorizer,
HashingVectorizer)
n_repeat = 3
def run_vectorizer(Vectorizer, X, **params):
def f():
vect = Vectorizer(**params)
vect.fit_transform(X)
return f
text = fetch_20newsgroups(subset='train').data[:1000]
print("="*80 + '\n#' + " Text vectorizers benchmark" + '\n' + '='*80 + '\n')
print("Using a subset of the 20 newsrgoups dataset ({} documents)."
.format(len(text)))
print("This benchmarks runs in ~1 min ...")
res = []
for Vectorizer, (analyzer, ngram_range) in itertools.product(
[CountVectorizer, TfidfVectorizer, HashingVectorizer],
[('word', (1, 1)),
('word', (1, 2)),
('char', (4, 4)),
('char_wb', (4, 4))
]):
bench = {'vectorizer': Vectorizer.__name__}
params = {'analyzer': analyzer, 'ngram_range': ngram_range}
bench.update(params)
dt = timeit.repeat(run_vectorizer(Vectorizer, text, **params),
number=1,
repeat=n_repeat)
bench['time'] = "{:.3f} (+-{:.3f})".format(np.mean(dt), np.std(dt))
mem_usage = memory_usage(run_vectorizer(Vectorizer, text, **params))
bench['memory'] = "{:.1f}".format(np.max(mem_usage))
res.append(bench)
df = pd.DataFrame(res).set_index(['analyzer', 'ngram_range', 'vectorizer'])
print('\n========== Run time performance (sec) ===========\n')
print('Computing the mean and the standard deviation '
'of the run time over {} runs...\n'.format(n_repeat))
print(df['time'].unstack(level=-1))
print('\n=============== Memory usage (MB) ===============\n')
print(df['memory'].unstack(level=-1))
| bsd-3-clause |
ricket1978/ggplot | ggplot/geoms/geom_line.py | 12 | 1405 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
from .geom import geom
class geom_line(geom):
DEFAULT_AES = {'color': 'black', 'alpha': None, 'linetype': 'solid', 'size': 1.0}
REQUIRED_AES = {'x', 'y'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity'}
_aes_renames = {'size': 'linewidth', 'linetype': 'linestyle'}
_units = {'alpha', 'color', 'linestyle'}
def __init__(self, *args, **kwargs):
super(geom_line, self).__init__(*args, **kwargs)
self._warning_printed = False
def _plot_unit(self, pinfo, ax):
if 'linewidth' in pinfo and isinstance(pinfo['linewidth'], list):
# ggplot also supports aes(size=...) but the current mathplotlib
# is not. See https://github.com/matplotlib/matplotlib/issues/2658
pinfo['linewidth'] = 4
if not self._warning_printed:
msg = "'geom_line()' currenty does not support the mapping of " +\
"size ('aes(size=<var>'), using size=4 as a replacement.\n" +\
"Use 'geom_line(size=x)' to set the size for the whole line.\n"
sys.stderr.write(msg)
self._warning_printed = True
pinfo = self.sort_by_x(pinfo)
x = pinfo.pop('x')
y = pinfo.pop('y')
ax.plot(x, y, **pinfo)
| bsd-2-clause |
vikhyat/dask | dask/dataframe/tests/test_dataframe.py | 1 | 90686 | from itertools import product
from datetime import datetime
from operator import getitem
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
import dask
from dask.async import get_sync
from dask.utils import raises, ignoring
import dask.dataframe as dd
from dask.dataframe.core import (repartition_divisions, _loc,
_coerce_loc_index, aca, reduction, _concat, _Frame)
from dask.dataframe.utils import eq, assert_dask_graph
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 4, 9, 9])
full = d.compute()
def test_Dataframe():
result = (d['a'] + 1).compute()
expected = pd.Series([2, 3, 4, 5, 6, 7, 8, 9, 10],
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
name='a')
assert eq(result, expected)
assert list(d.columns) == list(['a', 'b'])
full = d.compute()
assert eq(d[d['b'] > 2], full[full['b'] > 2])
assert eq(d[['a', 'b']], full[['a', 'b']])
assert eq(d.a, full.a)
assert d.b.mean().compute() == full.b.mean()
assert np.allclose(d.b.var().compute(), full.b.var())
assert np.allclose(d.b.std().compute(), full.b.std())
assert d.index._name == d.index._name # this is deterministic
assert repr(d)
def test_head_tail():
assert eq(d.head(2), full.head(2))
assert eq(d.head(3), full.head(3))
assert eq(d.head(2), dsk[('x', 0)].head(2))
assert eq(d['a'].head(2), full['a'].head(2))
assert eq(d['a'].head(3), full['a'].head(3))
assert eq(d['a'].head(2), dsk[('x', 0)]['a'].head(2))
assert sorted(d.head(2, compute=False).dask) == \
sorted(d.head(2, compute=False).dask)
assert sorted(d.head(2, compute=False).dask) != \
sorted(d.head(3, compute=False).dask)
assert eq(d.tail(2), full.tail(2))
assert eq(d.tail(3), full.tail(3))
assert eq(d.tail(2), dsk[('x', 2)].tail(2))
assert eq(d['a'].tail(2), full['a'].tail(2))
assert eq(d['a'].tail(3), full['a'].tail(3))
assert eq(d['a'].tail(2), dsk[('x', 2)]['a'].tail(2))
assert sorted(d.tail(2, compute=False).dask) == \
sorted(d.tail(2, compute=False).dask)
assert sorted(d.tail(2, compute=False).dask) != \
sorted(d.tail(3, compute=False).dask)
def test_Series():
assert isinstance(d.a, dd.Series)
assert isinstance(d.a + 1, dd.Series)
assert eq((d + 1), full + 1)
assert repr(d.a).startswith('dd.Series')
def test_Index():
for case in [pd.DataFrame(np.random.randn(10, 5), index=list('abcdefghij')),
pd.DataFrame(np.random.randn(10, 5),
index=pd.date_range('2011-01-01', freq='D', periods=10))]:
ddf = dd.from_pandas(case, 3)
assert eq(ddf.index, case.index)
assert repr(ddf.index).startswith('dd.Index')
assert raises(AttributeError, lambda: ddf.index.index)
def test_attributes():
assert 'a' in dir(d)
assert 'foo' not in dir(d)
assert raises(AttributeError, lambda: d.foo)
def test_column_names():
assert d.columns == ('a', 'b')
assert d[['b', 'a']].columns == ('b', 'a')
assert d['a'].columns == ('a',)
assert (d['a'] + 1).columns == ('a',)
assert (d['a'] + d['b']).columns == (None,)
def test_set_index():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 2, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 5, 8]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [9, 1, 8]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 4, 9, 9])
full = d.compute()
d2 = d.set_index('b', npartitions=3)
assert d2.npartitions == 3
assert eq(d2, full.set_index('b'))
d3 = d.set_index(d.b, npartitions=3)
assert d3.npartitions == 3
assert eq(d3, full.set_index(full.b))
d4 = d.set_index('b')
assert eq(d4, full.set_index('b'))
def test_set_index_raises_error_on_bad_input():
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
assert raises(NotImplementedError, lambda: ddf.set_index(['a', 'b']))
def test_split_apply_combine_on_series():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 6], 'b': [4, 2, 7]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 4, 6], 'b': [3, 3, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [4, 3, 7], 'b': [1, 1, 3]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 4, 9, 9])
full = d.compute()
for ddkey, pdkey in [('b', 'b'), (d.b, full.b),
(d.b + 1, full.b + 1)]:
assert eq(d.groupby(ddkey).a.min(), full.groupby(pdkey).a.min())
assert eq(d.groupby(ddkey).a.max(), full.groupby(pdkey).a.max())
assert eq(d.groupby(ddkey).a.count(), full.groupby(pdkey).a.count())
assert eq(d.groupby(ddkey).a.mean(), full.groupby(pdkey).a.mean())
assert eq(d.groupby(ddkey).a.nunique(), full.groupby(pdkey).a.nunique())
assert eq(d.groupby(ddkey).sum(), full.groupby(pdkey).sum())
assert eq(d.groupby(ddkey).min(), full.groupby(pdkey).min())
assert eq(d.groupby(ddkey).max(), full.groupby(pdkey).max())
assert eq(d.groupby(ddkey).count(), full.groupby(pdkey).count())
assert eq(d.groupby(ddkey).mean(), full.groupby(pdkey).mean())
for ddkey, pdkey in [(d.b, full.b), (d.b + 1, full.b + 1)]:
assert eq(d.a.groupby(ddkey).sum(), full.a.groupby(pdkey).sum(), check_names=False)
assert eq(d.a.groupby(ddkey).max(), full.a.groupby(pdkey).max(), check_names=False)
assert eq(d.a.groupby(ddkey).count(), full.a.groupby(pdkey).count(), check_names=False)
assert eq(d.a.groupby(ddkey).mean(), full.a.groupby(pdkey).mean(), check_names=False)
assert eq(d.a.groupby(ddkey).nunique(), full.a.groupby(pdkey).nunique(), check_names=False)
for i in range(8):
assert eq(d.groupby(d.b > i).a.sum(), full.groupby(full.b > i).a.sum())
assert eq(d.groupby(d.b > i).a.min(), full.groupby(full.b > i).a.min())
assert eq(d.groupby(d.b > i).a.max(), full.groupby(full.b > i).a.max())
assert eq(d.groupby(d.b > i).a.count(), full.groupby(full.b > i).a.count())
assert eq(d.groupby(d.b > i).a.mean(), full.groupby(full.b > i).a.mean())
assert eq(d.groupby(d.b > i).a.nunique(), full.groupby(full.b > i).a.nunique())
assert eq(d.groupby(d.a > i).b.sum(), full.groupby(full.a > i).b.sum())
assert eq(d.groupby(d.a > i).b.min(), full.groupby(full.a > i).b.min())
assert eq(d.groupby(d.a > i).b.max(), full.groupby(full.a > i).b.max())
assert eq(d.groupby(d.a > i).b.count(), full.groupby(full.a > i).b.count())
assert eq(d.groupby(d.a > i).b.mean(), full.groupby(full.a > i).b.mean())
assert eq(d.groupby(d.a > i).b.nunique(), full.groupby(full.a > i).b.nunique())
assert eq(d.groupby(d.b > i).sum(), full.groupby(full.b > i).sum())
assert eq(d.groupby(d.b > i).min(), full.groupby(full.b > i).min())
assert eq(d.groupby(d.b > i).max(), full.groupby(full.b > i).max())
assert eq(d.groupby(d.b > i).count(), full.groupby(full.b > i).count())
assert eq(d.groupby(d.b > i).mean(), full.groupby(full.b > i).mean())
assert eq(d.groupby(d.a > i).sum(), full.groupby(full.a > i).sum())
assert eq(d.groupby(d.a > i).min(), full.groupby(full.a > i).min())
assert eq(d.groupby(d.a > i).max(), full.groupby(full.a > i).max())
assert eq(d.groupby(d.a > i).count(), full.groupby(full.a > i).count())
assert eq(d.groupby(d.a > i).mean(), full.groupby(full.a > i).mean())
for ddkey, pdkey in [('a', 'a'), (d.a, full.a),
(d.a + 1, full.a + 1), (d.a > 3, full.a > 3)]:
assert eq(d.groupby(ddkey).b.sum(), full.groupby(pdkey).b.sum())
assert eq(d.groupby(ddkey).b.min(), full.groupby(pdkey).b.min())
assert eq(d.groupby(ddkey).b.max(), full.groupby(pdkey).b.max())
assert eq(d.groupby(ddkey).b.count(), full.groupby(pdkey).b.count())
assert eq(d.groupby(ddkey).b.mean(), full.groupby(pdkey).b.mean())
assert eq(d.groupby(ddkey).b.nunique(), full.groupby(pdkey).b.nunique())
assert eq(d.groupby(ddkey).sum(), full.groupby(pdkey).sum())
assert eq(d.groupby(ddkey).min(), full.groupby(pdkey).min())
assert eq(d.groupby(ddkey).max(), full.groupby(pdkey).max())
assert eq(d.groupby(ddkey).count(), full.groupby(pdkey).count())
assert eq(d.groupby(ddkey).mean(), full.groupby(pdkey).mean().astype(float))
assert sorted(d.groupby('b').a.sum().dask) == \
sorted(d.groupby('b').a.sum().dask)
assert sorted(d.groupby(d.a > 3).b.mean().dask) == \
sorted(d.groupby(d.a > 3).b.mean().dask)
# test raises with incorrect key
assert raises(KeyError, lambda: d.groupby('x'))
assert raises(KeyError, lambda: d.groupby(['a', 'x']))
assert raises(KeyError, lambda: d.groupby('a')['x'])
assert raises(KeyError, lambda: d.groupby('a')['b', 'x'])
assert raises(KeyError, lambda: d.groupby('a')[['b', 'x']])
# test graph node labels
assert_dask_graph(d.groupby('b').a.sum(), 'series-groupby-sum')
assert_dask_graph(d.groupby('b').a.min(), 'series-groupby-min')
assert_dask_graph(d.groupby('b').a.max(), 'series-groupby-max')
assert_dask_graph(d.groupby('b').a.count(), 'series-groupby-count')
# mean consists from sum and count operations
assert_dask_graph(d.groupby('b').a.mean(), 'series-groupby-sum')
assert_dask_graph(d.groupby('b').a.mean(), 'series-groupby-count')
assert_dask_graph(d.groupby('b').a.nunique(), 'series-groupby-nunique')
assert_dask_graph(d.groupby('b').sum(), 'dataframe-groupby-sum')
assert_dask_graph(d.groupby('b').min(), 'dataframe-groupby-min')
assert_dask_graph(d.groupby('b').max(), 'dataframe-groupby-max')
assert_dask_graph(d.groupby('b').count(), 'dataframe-groupby-count')
# mean consists from sum and count operations
assert_dask_graph(d.groupby('b').mean(), 'dataframe-groupby-sum')
assert_dask_graph(d.groupby('b').mean(), 'dataframe-groupby-count')
def test_groupby_multilevel_getitem():
df = pd.DataFrame({'a': [1, 2, 3, 1, 2, 3],
'b': [1, 2, 1, 4, 2, 1],
'c': [1, 3, 2, 1, 1, 2],
'd': [1, 2, 1, 1, 2, 2]})
ddf = dd.from_pandas(df, 2)
cases = [(ddf.groupby('a')['b'], df.groupby('a')['b']),
(ddf.groupby(['a', 'b']), df.groupby(['a', 'b'])),
(ddf.groupby(['a', 'b'])['c'], df.groupby(['a', 'b'])['c']),
(ddf.groupby('a')[['b', 'c']], df.groupby('a')[['b', 'c']]),
(ddf.groupby('a')[['b']], df.groupby('a')[['b']]),
(ddf.groupby(['a', 'b', 'c']), df.groupby(['a', 'b', 'c']))]
for d, p in cases:
assert isinstance(d, dd.core._GroupBy)
assert isinstance(p, pd.core.groupby.GroupBy)
assert eq(d.sum(), p.sum())
assert eq(d.min(), p.min())
assert eq(d.max(), p.max())
assert eq(d.count(), p.count())
assert eq(d.mean(), p.mean().astype(float))
def test_groupby_get_group():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 6], 'b': [4, 2, 7]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 2, 6], 'b': [3, 3, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [4, 3, 7], 'b': [1, 1, 3]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 4, 9, 9])
full = d.compute()
for ddkey, pdkey in [('b', 'b'), (d.b, full.b),
(d.b + 1, full.b + 1)]:
ddgrouped = d.groupby(ddkey)
pdgrouped = full.groupby(pdkey)
# DataFrame
assert eq(ddgrouped.get_group(2), pdgrouped.get_group(2))
assert eq(ddgrouped.get_group(3), pdgrouped.get_group(3))
# Series
assert eq(ddgrouped.a.get_group(3), pdgrouped.a.get_group(3))
assert eq(ddgrouped.a.get_group(2), pdgrouped.a.get_group(2))
def test_arithmetics():
pdf2 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': [5, 6, 7, 8, 1, 2, 3, 4]})
pdf3 = pd.DataFrame({'a': [5, 6, 7, 8, 4, 3, 2, 1],
'b': [2, 4, 5, 3, 4, 2, 1, 0]})
ddf2 = dd.from_pandas(pdf2, 3)
ddf3 = dd.from_pandas(pdf3, 2)
dsk4 = {('y', 0): pd.DataFrame({'a': [3, 2, 1], 'b': [7, 8, 9]},
index=[0, 1, 3]),
('y', 1): pd.DataFrame({'a': [5, 2, 8], 'b': [4, 2, 3]},
index=[5, 6, 8]),
('y', 2): pd.DataFrame({'a': [1, 4, 10], 'b': [1, 0, 5]},
index=[9, 9, 9])}
ddf4 = dd.DataFrame(dsk4, 'y', ['a', 'b'], [0, 4, 9, 9])
pdf4 =ddf4.compute()
# Arithmetics
cases = [(d, d, full, full),
(d, d.repartition([0, 1, 3, 6, 9]), full, full),
(ddf2, ddf3, pdf2, pdf3),
(ddf2.repartition([0, 3, 6, 7]), ddf3.repartition([0, 7]),
pdf2, pdf3),
(ddf2.repartition([0, 7]), ddf3.repartition([0, 2, 4, 5, 7]),
pdf2, pdf3),
(d, ddf4, full, pdf4),
(d, ddf4.repartition([0, 9]), full, pdf4),
(d.repartition([0, 3, 9]), ddf4.repartition([0, 5, 9]),
full, pdf4),
# dask + pandas
(d, pdf4, full, pdf4), (ddf2, pdf3, pdf2, pdf3)]
for (l, r, el, er) in cases:
check_series_arithmetics(l.a, r.b, el.a, er.b)
check_frame_arithmetics(l, r, el, er)
# different index, pandas raises ValueError in comparison ops
pdf5 = pd.DataFrame({'a': [3, 2, 1, 5, 2, 8, 1, 4, 10],
'b': [7, 8, 9, 4, 2, 3, 1, 0, 5]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
ddf5 = dd.from_pandas(pdf5, 2)
pdf6 = pd.DataFrame({'a': [3, 2, 1, 5, 2 ,8, 1, 4, 10],
'b': [7, 8, 9, 5, 7, 8, 4, 2, 5]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 9])
ddf6 = dd.from_pandas(pdf6, 4)
pdf7 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': [5, 6, 7, 8, 1, 2, 3, 4]},
index=list('aaabcdeh'))
pdf8 = pd.DataFrame({'a': [5, 6, 7, 8, 4, 3, 2, 1],
'b': [2, 4, 5, 3, 4, 2, 1, 0]},
index=list('abcdefgh'))
ddf7 = dd.from_pandas(pdf7, 3)
ddf8 = dd.from_pandas(pdf8, 4)
pdf9 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': [5, 6, 7, 8, 1, 2, 3, 4],
'c': [5, 6, 7, 8, 1, 2, 3, 4]},
index=list('aaabcdeh'))
pdf10 = pd.DataFrame({'b': [5, 6, 7, 8, 4, 3, 2, 1],
'c': [2, 4, 5, 3, 4, 2, 1, 0],
'd': [2, 4, 5, 3, 4, 2, 1, 0]},
index=list('abcdefgh'))
ddf9 = dd.from_pandas(pdf9, 3)
ddf10 = dd.from_pandas(pdf10, 4)
# Arithmetics with different index
cases = [(ddf5, ddf6, pdf5, pdf6),
(ddf5.repartition([0, 9]), ddf6, pdf5, pdf6),
(ddf5.repartition([0, 5, 9]), ddf6.repartition([0, 7, 9]),
pdf5, pdf6),
(ddf7, ddf8, pdf7, pdf8),
(ddf7.repartition(['a', 'c', 'h']), ddf8.repartition(['a', 'h']),
pdf7, pdf8),
(ddf7.repartition(['a', 'b', 'e', 'h']),
ddf8.repartition(['a', 'e', 'h']), pdf7, pdf8),
(ddf9, ddf10, pdf9, pdf10),
(ddf9.repartition(['a', 'c', 'h']), ddf10.repartition(['a', 'h']),
pdf9, pdf10),
# dask + pandas
(ddf5, pdf6, pdf5, pdf6), (ddf7, pdf8, pdf7, pdf8),
(ddf9, pdf10, pdf9, pdf10)]
for (l, r, el, er) in cases:
check_series_arithmetics(l.a, r.b, el.a, er.b,
allow_comparison_ops=False)
check_frame_arithmetics(l, r, el, er,
allow_comparison_ops=False)
def test_arithmetics_different_index():
# index are different, but overwraps
pdf1 = pd.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [3, 5, 2, 5, 7]},
index=[1, 2, 3, 4, 5])
ddf1 = dd.from_pandas(pdf1, 2)
pdf2 = pd.DataFrame({'a': [3, 2, 6, 7, 8], 'b': [9, 4, 2, 6, 2]},
index=[3, 4, 5, 6, 7])
ddf2 = dd.from_pandas(pdf2, 2)
# index are not overwrapped
pdf3 = pd.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [3, 5, 2, 5, 7]},
index=[1, 2, 3, 4, 5])
ddf3 = dd.from_pandas(pdf3, 2)
pdf4 = pd.DataFrame({'a': [3, 2, 6, 7, 8], 'b': [9, 4, 2, 6, 2]},
index=[10, 11, 12, 13, 14])
ddf4 = dd.from_pandas(pdf4, 2)
# index is included in another
pdf5 = pd.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [3, 5, 2, 5, 7]},
index=[1, 3, 5, 7, 9])
ddf5 = dd.from_pandas(pdf5, 2)
pdf6 = pd.DataFrame({'a': [3, 2, 6, 7, 8], 'b': [9, 4, 2, 6, 2]},
index=[2, 3, 4, 5, 6])
ddf6 = dd.from_pandas(pdf6, 2)
cases = [(ddf1, ddf2, pdf1, pdf2),
(ddf2, ddf1, pdf2, pdf1),
(ddf1.repartition([1, 3, 5]), ddf2.repartition([3, 4, 7]),
pdf1, pdf2),
(ddf2.repartition([3, 4, 5, 7]), ddf1.repartition([1, 2, 4, 5]),
pdf2, pdf1),
(ddf3, ddf4, pdf3, pdf4),
(ddf4, ddf3, pdf4, pdf3),
(ddf3.repartition([1, 2, 3, 4, 5]),
ddf4.repartition([10, 11, 12, 13, 14]), pdf3, pdf4),
(ddf4.repartition([10, 14]), ddf3.repartition([1, 3, 4, 5]),
pdf4, pdf3),
(ddf5, ddf6, pdf5, pdf6),
(ddf6, ddf5, pdf6, pdf5),
(ddf5.repartition([1, 7, 8, 9]), ddf6.repartition([2, 3, 4, 6]),
pdf5, pdf6),
(ddf6.repartition([2, 6]), ddf5.repartition([1, 3, 7, 9]),
pdf6, pdf5),
# dask + pandas
(ddf1, pdf2, pdf1, pdf2), (ddf2, pdf1, pdf2, pdf1),
(ddf3, pdf4, pdf3, pdf4), (ddf4, pdf3, pdf4, pdf3),
(ddf5, pdf6, pdf5, pdf6), (ddf6, pdf5, pdf6, pdf5)]
for (l, r, el, er) in cases:
check_series_arithmetics(l.a, r.b, el.a, er.b,
allow_comparison_ops=False)
check_frame_arithmetics(l, r, el, er,
allow_comparison_ops=False)
pdf7 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': [5, 6, 7, 8, 1, 2, 3, 4]},
index=[0, 2, 4, 8, 9, 10, 11, 13])
pdf8 = pd.DataFrame({'a': [5, 6, 7, 8, 4, 3, 2, 1],
'b': [2, 4, 5, 3, 4, 2, 1, 0]},
index=[1, 3, 4, 8, 9, 11, 12, 13])
ddf7 = dd.from_pandas(pdf7, 3)
ddf8 = dd.from_pandas(pdf8, 2)
pdf9 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': [5, 6, 7, 8, 1, 2, 3, 4]},
index=[0, 2, 4, 8, 9, 10, 11, 13])
pdf10 = pd.DataFrame({'a': [5, 6, 7, 8, 4, 3, 2, 1],
'b': [2, 4, 5, 3, 4, 2, 1, 0]},
index=[0, 3, 4, 8, 9, 11, 12, 13])
ddf9 = dd.from_pandas(pdf9, 3)
ddf10 = dd.from_pandas(pdf10, 2)
cases = [(ddf7, ddf8, pdf7, pdf8),
(ddf8, ddf7, pdf8, pdf7),
(ddf7.repartition([0, 13]),
ddf8.repartition([0, 4, 11, 14], force=True),
pdf7, pdf8),
(ddf8.repartition([-5, 10, 15], force=True),
ddf7.repartition([-1, 4, 11, 14], force=True), pdf8, pdf7),
(ddf7.repartition([0, 8, 12, 13]),
ddf8.repartition([0, 2, 8, 12, 13], force=True), pdf7, pdf8),
(ddf8.repartition([-5, 0, 10, 20], force=True),
ddf7.repartition([-1, 4, 11, 13], force=True), pdf8, pdf7),
(ddf9, ddf10, pdf9, pdf10),
(ddf10, ddf9, pdf10, pdf9),
# dask + pandas
(ddf7, pdf8, pdf7, pdf8), (ddf8, pdf7, pdf8, pdf7),
(ddf9, pdf10, pdf9, pdf10), (ddf10, pdf9, pdf10, pdf9)]
for (l, r, el, er) in cases:
check_series_arithmetics(l.a, r.b, el.a, er.b,
allow_comparison_ops=False)
check_frame_arithmetics(l, r, el, er,
allow_comparison_ops=False)
def check_series_arithmetics(l, r, el, er, allow_comparison_ops=True):
assert isinstance(l, dd.Series)
assert isinstance(r, (dd.Series, pd.Series))
assert isinstance(el, pd.Series)
assert isinstance(er, pd.Series)
# l, r may be repartitioned, test whether repartition keeps original data
assert eq(l, el)
assert eq(r, er)
assert eq(l + r, el + er)
assert eq(l * r, el * er)
assert eq(l - r, el - er)
assert eq(l / r, el / er)
assert eq(l // r, el // er)
assert eq(l ** r, el ** er)
assert eq(l % r, el % er)
if allow_comparison_ops:
# comparison is allowed if data have same index
assert eq(l & r, el & er)
assert eq(l | r, el | er)
assert eq(l ^ r, el ^ er)
assert eq(l > r, el > er)
assert eq(l < r, el < er)
assert eq(l >= r, el >= er)
assert eq(l <= r, el <= er)
assert eq(l == r, el == er)
assert eq(l != r, el != er)
assert eq(l + 2, el + 2)
assert eq(l * 2, el * 2)
assert eq(l - 2, el - 2)
assert eq(l / 2, el / 2)
assert eq(l & True, el & True)
assert eq(l | True, el | True)
assert eq(l ^ True, el ^ True)
assert eq(l // 2, el // 2)
assert eq(l ** 2, el ** 2)
assert eq(l % 2, el % 2)
assert eq(l > 2, el > 2)
assert eq(l < 2, el < 2)
assert eq(l >= 2, el >= 2)
assert eq(l <= 2, el <= 2)
assert eq(l == 2, el == 2)
assert eq(l != 2, el != 2)
assert eq(2 + r, 2 + er)
assert eq(2 * r, 2 * er)
assert eq(2 - r, 2 - er)
assert eq(2 / r, 2 / er)
assert eq(True & r, True & er)
assert eq(True | r, True | er)
assert eq(True ^ r, True ^ er)
assert eq(2 // r, 2 // er)
assert eq(2 ** r, 2 ** er)
assert eq(2 % r, 2 % er)
assert eq(2 > r, 2 > er)
assert eq(2 < r, 2 < er)
assert eq(2 >= r, 2 >= er)
assert eq(2 <= r, 2 <= er)
assert eq(2 == r, 2 == er)
assert eq(2 != r, 2 != er)
assert eq(-l, -el)
assert eq(abs(l), abs(el))
if allow_comparison_ops:
# comparison is allowed if data have same index
assert eq(~(l == r), ~(el == er))
def check_frame_arithmetics(l, r, el, er, allow_comparison_ops=True):
assert isinstance(l, dd.DataFrame)
assert isinstance(r, (dd.DataFrame, pd.DataFrame))
assert isinstance(el, pd.DataFrame)
assert isinstance(er, pd.DataFrame)
# l, r may be repartitioned, test whether repartition keeps original data
assert eq(l, el)
assert eq(r, er)
assert eq(l + r, el + er)
assert eq(l * r, el * er)
assert eq(l - r, el - er)
assert eq(l / r, el / er)
assert eq(l // r, el // er)
assert eq(l ** r, el ** er)
assert eq(l % r, el % er)
if allow_comparison_ops:
# comparison is allowed if data have same index
assert eq(l & r, el & er)
assert eq(l | r, el | er)
assert eq(l ^ r, el ^ er)
assert eq(l > r, el > er)
assert eq(l < r, el < er)
assert eq(l >= r, el >= er)
assert eq(l <= r, el <= er)
assert eq(l == r, el == er)
assert eq(l != r, el != er)
assert eq(l + 2, el + 2)
assert eq(l * 2, el * 2)
assert eq(l - 2, el - 2)
assert eq(l / 2, el / 2)
assert eq(l & True, el & True)
assert eq(l | True, el | True)
assert eq(l ^ True, el ^ True)
assert eq(l // 2, el // 2)
assert eq(l ** 2, el ** 2)
assert eq(l % 2, el % 2)
assert eq(l > 2, el > 2)
assert eq(l < 2, el < 2)
assert eq(l >= 2, el >= 2)
assert eq(l <= 2, el <= 2)
assert eq(l == 2, el == 2)
assert eq(l != 2, el != 2)
assert eq(2 + l, 2 + el)
assert eq(2 * l, 2 * el)
assert eq(2 - l, 2 - el)
assert eq(2 / l, 2 / el)
assert eq(True & l, True & el)
assert eq(True | l, True | el)
assert eq(True ^ l, True ^ el)
assert eq(2 // l, 2 // el)
assert eq(2 ** l, 2 ** el)
assert eq(2 % l, 2 % el)
assert eq(2 > l, 2 > el)
assert eq(2 < l, 2 < el)
assert eq(2 >= l, 2 >= el)
assert eq(2 <= l, 2 <= el)
assert eq(2 == l, 2 == el)
assert eq(2 != l, 2 != el)
assert eq(-l, -el)
assert eq(abs(l), abs(el))
if allow_comparison_ops:
# comparison is allowed if data have same index
assert eq(~(l == r), ~(el == er))
def test_scalar_arithmetics():
l = dd.core.Scalar({('l', 0): 10}, 'l')
r = dd.core.Scalar({('r', 0): 4}, 'r')
el = 10
er = 4
assert isinstance(l, dd.core.Scalar)
assert isinstance(r, dd.core.Scalar)
# l, r may be repartitioned, test whether repartition keeps original data
assert eq(l, el)
assert eq(r, er)
assert eq(l + r, el + er)
assert eq(l * r, el * er)
assert eq(l - r, el - er)
assert eq(l / r, el / er)
assert eq(l // r, el // er)
assert eq(l ** r, el ** er)
assert eq(l % r, el % er)
assert eq(l & r, el & er)
assert eq(l | r, el | er)
assert eq(l ^ r, el ^ er)
assert eq(l > r, el > er)
assert eq(l < r, el < er)
assert eq(l >= r, el >= er)
assert eq(l <= r, el <= er)
assert eq(l == r, el == er)
assert eq(l != r, el != er)
assert eq(l + 2, el + 2)
assert eq(l * 2, el * 2)
assert eq(l - 2, el - 2)
assert eq(l / 2, el / 2)
assert eq(l & True, el & True)
assert eq(l | True, el | True)
assert eq(l ^ True, el ^ True)
assert eq(l // 2, el // 2)
assert eq(l ** 2, el ** 2)
assert eq(l % 2, el % 2)
assert eq(l > 2, el > 2)
assert eq(l < 2, el < 2)
assert eq(l >= 2, el >= 2)
assert eq(l <= 2, el <= 2)
assert eq(l == 2, el == 2)
assert eq(l != 2, el != 2)
assert eq(2 + r, 2 + er)
assert eq(2 * r, 2 * er)
assert eq(2 - r, 2 - er)
assert eq(2 / r, 2 / er)
assert eq(True & r, True & er)
assert eq(True | r, True | er)
assert eq(True ^ r, True ^ er)
assert eq(2 // r, 2 // er)
assert eq(2 ** r, 2 ** er)
assert eq(2 % r, 2 % er)
assert eq(2 > r, 2 > er)
assert eq(2 < r, 2 < er)
assert eq(2 >= r, 2 >= er)
assert eq(2 <= r, 2 <= er)
assert eq(2 == r, 2 == er)
assert eq(2 != r, 2 != er)
assert eq(-l, -el)
assert eq(abs(l), abs(el))
assert eq(~(l == r), ~(el == er))
def test_scalar_arithmetics_with_dask_instances():
s = dd.core.Scalar({('s', 0): 10}, 's')
e = 10
pds = pd.Series([1, 2, 3, 4, 5, 6, 7])
dds = dd.from_pandas(pds, 2)
pdf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(pdf, 2)
# pandas Series
result = pds + s # this result pd.Series (automatically computed)
assert isinstance(result, pd.Series)
assert eq(result, pds + e)
result = s + pds # this result dd.Series
assert isinstance(result, dd.Series)
assert eq(result, pds + e)
# dask Series
result = dds + s # this result dd.Series
assert isinstance(result, dd.Series)
assert eq(result, pds + e)
result = s + dds # this result dd.Series
assert isinstance(result, dd.Series)
assert eq(result, pds + e)
# pandas DataFrame
result = pdf + s # this result pd.DataFrame (automatically computed)
assert isinstance(result, pd.DataFrame)
assert eq(result, pdf + e)
result = s + pdf # this result dd.DataFrame
assert isinstance(result, dd.DataFrame)
assert eq(result, pdf + e)
# dask DataFrame
result = ddf + s # this result dd.DataFrame
assert isinstance(result, dd.DataFrame)
assert eq(result, pdf + e)
result = s + ddf # this result dd.DataFrame
assert isinstance(result, dd.DataFrame)
assert eq(result, pdf + e)
def test_frame_series_arithmetic_methods():
pdf1 = pd.DataFrame({'A': np.arange(10),
'B': [np.nan, 1, 2, 3, 4] * 2,
'C': [np.nan] * 10,
'D': np.arange(10)},
index=list('abcdefghij'), columns=list('ABCD'))
pdf2 = pd.DataFrame(np.random.randn(10, 4),
index=list('abcdefghjk'), columns=list('ABCX'))
ps1 = pdf1.A
ps2 = pdf2.A
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 2)
ds1 = ddf1.A
ds2 = ddf2.A
s = dd.core.Scalar({('s', 0): 4}, 's')
for l, r, el, er in [(ddf1, ddf2, pdf1, pdf2), (ds1, ds2, ps1, ps2),
(ddf1.repartition(['a', 'f', 'j']), ddf2, pdf1, pdf2),
(ds1.repartition(['a', 'b', 'f', 'j']), ds2, ps1, ps2),
(ddf1, ddf2.repartition(['a', 'k']), pdf1, pdf2),
(ds1, ds2.repartition(['a', 'b', 'd', 'h', 'k']), ps1, ps2),
(ddf1, 3, pdf1, 3), (ds1, 3, ps1, 3),
(ddf1, s, pdf1, 4), (ds1, s, ps1, 4)]:
# l, r may be repartitioned, test whether repartition keeps original data
assert eq(l, el)
assert eq(r, er)
assert eq(l.add(r, fill_value=0), el.add(er, fill_value=0))
assert eq(l.sub(r, fill_value=0), el.sub(er, fill_value=0))
assert eq(l.mul(r, fill_value=0), el.mul(er, fill_value=0))
assert eq(l.div(r, fill_value=0), el.div(er, fill_value=0))
assert eq(l.truediv(r, fill_value=0), el.truediv(er, fill_value=0))
assert eq(l.floordiv(r, fill_value=1), el.floordiv(er, fill_value=1))
assert eq(l.mod(r, fill_value=0), el.mod(er, fill_value=0))
assert eq(l.pow(r, fill_value=0), el.pow(er, fill_value=0))
assert eq(l.radd(r, fill_value=0), el.radd(er, fill_value=0))
assert eq(l.rsub(r, fill_value=0), el.rsub(er, fill_value=0))
assert eq(l.rmul(r, fill_value=0), el.rmul(er, fill_value=0))
assert eq(l.rdiv(r, fill_value=0), el.rdiv(er, fill_value=0))
assert eq(l.rtruediv(r, fill_value=0), el.rtruediv(er, fill_value=0))
assert eq(l.rfloordiv(r, fill_value=1), el.rfloordiv(er, fill_value=1))
assert eq(l.rmod(r, fill_value=0), el.rmod(er, fill_value=0))
assert eq(l.rpow(r, fill_value=0), el.rpow(er, fill_value=0))
for l, r, el, er in [(ddf1, ds2, pdf1, ps2), (ddf1, ddf2.X, pdf1, pdf2.X)]:
assert eq(l, el)
assert eq(r, er)
# must specify axis=0 to add Series to each column
# axis=1 is not supported (add to each row)
assert eq(l.add(r, axis=0), el.add(er, axis=0))
assert eq(l.sub(r, axis=0), el.sub(er, axis=0))
assert eq(l.mul(r, axis=0), el.mul(er, axis=0))
assert eq(l.div(r, axis=0), el.div(er, axis=0))
assert eq(l.truediv(r, axis=0), el.truediv(er, axis=0))
assert eq(l.floordiv(r, axis=0), el.floordiv(er, axis=0))
assert eq(l.mod(r, axis=0), el.mod(er, axis=0))
assert eq(l.pow(r, axis=0), el.pow(er, axis=0))
assert eq(l.radd(r, axis=0), el.radd(er, axis=0))
assert eq(l.rsub(r, axis=0), el.rsub(er, axis=0))
assert eq(l.rmul(r, axis=0), el.rmul(er, axis=0))
assert eq(l.rdiv(r, axis=0), el.rdiv(er, axis=0))
assert eq(l.rtruediv(r, axis=0), el.rtruediv(er, axis=0))
assert eq(l.rfloordiv(r, axis=0), el.rfloordiv(er, axis=0))
assert eq(l.rmod(r, axis=0), el.rmod(er, axis=0))
assert eq(l.rpow(r, axis=0), el.rpow(er, axis=0))
assert raises(ValueError, lambda: l.add(r, axis=1))
for l, r, el, er in [(ddf1, pdf2, pdf1, pdf2), (ddf1, ps2, pdf1, ps2)]:
assert eq(l, el)
assert eq(r, er)
for axis in [0, 1, 'index', 'columns']:
assert eq(l.add(r, axis=axis), el.add(er, axis=axis))
assert eq(l.sub(r, axis=axis), el.sub(er, axis=axis))
assert eq(l.mul(r, axis=axis), el.mul(er, axis=axis))
assert eq(l.div(r, axis=axis), el.div(er, axis=axis))
assert eq(l.truediv(r, axis=axis), el.truediv(er, axis=axis))
assert eq(l.floordiv(r, axis=axis), el.floordiv(er, axis=axis))
assert eq(l.mod(r, axis=axis), el.mod(er, axis=axis))
assert eq(l.pow(r, axis=axis), el.pow(er, axis=axis))
assert eq(l.radd(r, axis=axis), el.radd(er, axis=axis))
assert eq(l.rsub(r, axis=axis), el.rsub(er, axis=axis))
assert eq(l.rmul(r, axis=axis), el.rmul(er, axis=axis))
assert eq(l.rdiv(r, axis=axis), el.rdiv(er, axis=axis))
assert eq(l.rtruediv(r, axis=axis), el.rtruediv(er, axis=axis))
assert eq(l.rfloordiv(r, axis=axis), el.rfloordiv(er, axis=axis))
assert eq(l.rmod(r, axis=axis), el.rmod(er, axis=axis))
assert eq(l.rpow(r, axis=axis), el.rpow(er, axis=axis))
def test_reductions():
nans1 = pd.Series([1] + [np.nan] * 4 + [2] + [np.nan] * 3)
nands1 = dd.from_pandas(nans1, 2)
nans2 = pd.Series([1] + [np.nan] * 8)
nands2 = dd.from_pandas(nans2, 2)
nans3 = pd.Series([np.nan] * 9)
nands3 = dd.from_pandas(nans3, 2)
bools = pd.Series([True, False, True, False, True], dtype=bool)
boolds = dd.from_pandas(bools, 2)
for dds, pds in [(d.b, full.b), (d.a, full.a),
(d['a'], full['a']), (d['b'], full['b']),
(nands1, nans1), (nands2, nans2), (nands3, nans3),
(boolds, bools)]:
assert isinstance(dds, dd.Series)
assert isinstance(pds, pd.Series)
assert eq(dds.sum(), pds.sum())
assert eq(dds.min(), pds.min())
assert eq(dds.max(), pds.max())
assert eq(dds.count(), pds.count())
assert eq(dds.std(), pds.std())
assert eq(dds.var(), pds.var())
assert eq(dds.std(ddof=0), pds.std(ddof=0))
assert eq(dds.var(ddof=0), pds.var(ddof=0))
assert eq(dds.mean(), pds.mean())
assert eq(dds.nunique(), pds.nunique())
assert eq(dds.nbytes, pds.nbytes)
assert_dask_graph(d.b.sum(), 'series-sum')
assert_dask_graph(d.b.min(), 'series-min')
assert_dask_graph(d.b.max(), 'series-max')
assert_dask_graph(d.b.count(), 'series-count')
assert_dask_graph(d.b.std(), 'series-std(ddof=1)')
assert_dask_graph(d.b.var(), 'series-var(ddof=1)')
assert_dask_graph(d.b.std(ddof=0), 'series-std(ddof=0)')
assert_dask_graph(d.b.var(ddof=0), 'series-var(ddof=0)')
assert_dask_graph(d.b.mean(), 'series-mean')
# nunique is performed using drop-duplicates
assert_dask_graph(d.b.nunique(), 'drop-duplicates')
def test_reduction_series_invalid_axis():
for axis in [1, 'columns']:
for s in [d.a, full.a]: # both must behave the same
assert raises(ValueError, lambda: s.sum(axis=axis))
assert raises(ValueError, lambda: s.min(axis=axis))
assert raises(ValueError, lambda: s.max(axis=axis))
# only count doesn't have axis keyword
assert raises(TypeError, lambda: s.count(axis=axis))
assert raises(ValueError, lambda: s.std(axis=axis))
assert raises(ValueError, lambda: s.var(axis=axis))
assert raises(ValueError, lambda: s.mean(axis=axis))
def test_reductions_non_numeric_dtypes():
# test non-numric blocks
def check_raises(d, p, func):
assert raises((TypeError, ValueError),
lambda: getattr(d, func)().compute())
assert raises((TypeError, ValueError),
lambda: getattr(p, func)())
pds = pd.Series(['a', 'b', 'c', 'd', 'e'])
dds = dd.from_pandas(pds, 2)
assert eq(dds.sum(), pds.sum())
assert eq(dds.min(), pds.min())
assert eq(dds.max(), pds.max())
assert eq(dds.count(), pds.count())
check_raises(dds, pds, 'std')
check_raises(dds, pds, 'var')
check_raises(dds, pds, 'mean')
assert eq(dds.nunique(), pds.nunique())
for pds in [pd.Series(pd.Categorical([1, 2, 3, 4, 5], ordered=True)),
pd.Series(pd.Categorical(list('abcde'), ordered=True)),
pd.Series(pd.date_range('2011-01-01', freq='D', periods=5))]:
dds = dd.from_pandas(pds, 2)
check_raises(dds, pds, 'sum')
assert eq(dds.min(), pds.min())
assert eq(dds.max(), pds.max())
assert eq(dds.count(), pds.count())
check_raises(dds, pds, 'std')
check_raises(dds, pds, 'var')
check_raises(dds, pds, 'mean')
assert eq(dds.nunique(), pds.nunique())
pds= pd.Series(pd.timedelta_range('1 days', freq='D', periods=5))
dds = dd.from_pandas(pds, 2)
assert eq(dds.sum(), pds.sum())
assert eq(dds.min(), pds.min())
assert eq(dds.max(), pds.max())
assert eq(dds.count(), pds.count())
# ToDo: pandas supports timedelta std, otherwise dask raises:
# incompatible type for a datetime/timedelta operation [__pow__]
# assert eq(dds.std(), pds.std())
# assert eq(dds.var(), pds.var())
# ToDo: pandas supports timedelta std, otherwise dask raises:
# TypeError: unsupported operand type(s) for *: 'float' and 'Timedelta'
# assert eq(dds.mean(), pds.mean())
assert eq(dds.nunique(), pds.nunique())
def test_reductions_frame():
assert eq(d.sum(), full.sum())
assert eq(d.min(), full.min())
assert eq(d.max(), full.max())
assert eq(d.count(), full.count())
assert eq(d.std(), full.std())
assert eq(d.var(), full.var())
assert eq(d.std(ddof=0), full.std(ddof=0))
assert eq(d.var(ddof=0), full.var(ddof=0))
assert eq(d.mean(), full.mean())
for axis in [0, 1, 'index', 'columns']:
assert eq(d.sum(axis=axis), full.sum(axis=axis))
assert eq(d.min(axis=axis), full.min(axis=axis))
assert eq(d.max(axis=axis), full.max(axis=axis))
assert eq(d.count(axis=axis), full.count(axis=axis))
assert eq(d.std(axis=axis), full.std(axis=axis))
assert eq(d.var(axis=axis), full.var(axis=axis))
assert eq(d.std(axis=axis, ddof=0), full.std(axis=axis, ddof=0))
assert eq(d.var(axis=axis, ddof=0), full.var(axis=axis, ddof=0))
assert eq(d.mean(axis=axis), full.mean(axis=axis))
assert raises(ValueError, lambda: d.sum(axis='incorrect').compute())
# axis=0
assert_dask_graph(d.sum(), 'dataframe-sum')
assert_dask_graph(d.min(), 'dataframe-min')
assert_dask_graph(d.max(), 'dataframe-max')
assert_dask_graph(d.count(), 'dataframe-count')
# std, var, mean consists from sum and count operations
assert_dask_graph(d.std(), 'dataframe-sum')
assert_dask_graph(d.std(), 'dataframe-count')
assert_dask_graph(d.var(), 'dataframe-sum')
assert_dask_graph(d.var(), 'dataframe-count')
assert_dask_graph(d.mean(), 'dataframe-sum')
assert_dask_graph(d.mean(), 'dataframe-count')
# axis=1
assert_dask_graph(d.sum(axis=1), 'dataframe-sum(axis=1)')
assert_dask_graph(d.min(axis=1), 'dataframe-min(axis=1)')
assert_dask_graph(d.max(axis=1), 'dataframe-max(axis=1)')
assert_dask_graph(d.count(axis=1), 'dataframe-count(axis=1)')
assert_dask_graph(d.std(axis=1), 'dataframe-std(axis=1, ddof=1)')
assert_dask_graph(d.var(axis=1), 'dataframe-var(axis=1, ddof=1)')
assert_dask_graph(d.mean(axis=1), 'dataframe-mean(axis=1)')
def test_reductions_frame_dtypes():
df = pd.DataFrame({'int': [1, 2, 3, 4, 5, 6, 7, 8],
'float': [1., 2., 3., 4., np.nan, 6., 7., 8.],
'dt': [pd.NaT] + [datetime(2011, i, 1) for i in range(1, 8)],
'str': list('abcdefgh')})
ddf = dd.from_pandas(df, 3)
assert eq(df.sum(), ddf.sum())
assert eq(df.min(), ddf.min())
assert eq(df.max(), ddf.max())
assert eq(df.count(), ddf.count())
assert eq(df.std(), ddf.std())
assert eq(df.var(), ddf.var())
assert eq(df.std(ddof=0), ddf.std(ddof=0))
assert eq(df.var(ddof=0), ddf.var(ddof=0))
assert eq(df.mean(), ddf.mean())
assert eq(df._get_numeric_data(), ddf._get_numeric_data())
numerics = ddf[['int', 'float']]
assert numerics._get_numeric_data().dask == numerics.dask
def test_describe():
# prepare test case which approx quantiles will be the same as actuals
s = pd.Series(list(range(20)) * 4)
df = pd.DataFrame({'a': list(range(20)) * 4, 'b': list(range(4)) * 20})
ds = dd.from_pandas(s, 4)
ddf = dd.from_pandas(df, 4)
assert eq(s.describe(), ds.describe())
assert eq(df.describe(), ddf.describe())
# remove string columns
df = pd.DataFrame({'a': list(range(20)) * 4, 'b': list(range(4)) * 20,
'c': list('abcd') * 20})
ddf = dd.from_pandas(df, 4)
assert eq(df.describe(), ddf.describe())
def test_cumulative():
pdf = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'))
ddf = dd.from_pandas(pdf, 5)
assert eq(ddf.cumsum(), pdf.cumsum())
assert eq(ddf.cumprod(), pdf.cumprod())
assert eq(ddf.cummin(), pdf.cummin())
assert eq(ddf.cummax(), pdf.cummax())
assert eq(ddf.cumsum(axis=1), pdf.cumsum(axis=1))
assert eq(ddf.cumprod(axis=1), pdf.cumprod(axis=1))
assert eq(ddf.cummin(axis=1), pdf.cummin(axis=1))
assert eq(ddf.cummax(axis=1), pdf.cummax(axis=1))
assert eq(ddf.a.cumsum(), pdf.a.cumsum())
assert eq(ddf.a.cumprod(), pdf.a.cumprod())
assert eq(ddf.a.cummin(), pdf.a.cummin())
assert eq(ddf.a.cummax(), pdf.a.cummax())
def test_dropna():
df = pd.DataFrame({'x': [np.nan, 2, 3, 4, np.nan, 6],
'y': [1, 2, np.nan, 4, np.nan, np.nan],
'z': [1, 2, 3, 4, np.nan, np.nan]},
index=[10, 20, 30, 40, 50, 60])
ddf = dd.from_pandas(df, 3)
assert eq(ddf.x.dropna(), df.x.dropna())
assert eq(ddf.y.dropna(), df.y.dropna())
assert eq(ddf.z.dropna(), df.z.dropna())
assert eq(ddf.dropna(), df.dropna())
assert eq(ddf.dropna(how='all'), df.dropna(how='all'))
assert eq(ddf.dropna(subset=['x']), df.dropna(subset=['x']))
assert eq(ddf.dropna(subset=['y', 'z']), df.dropna(subset=['y', 'z']))
assert eq(ddf.dropna(subset=['y', 'z'], how='all'),
df.dropna(subset=['y', 'z'], how='all'))
def test_where_mask():
pdf1 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]})
ddf1 = dd.from_pandas(pdf1, 2)
pdf2 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3})
ddf2 = dd.from_pandas(pdf2, 2)
# different index
pdf3 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
ddf3 = dd.from_pandas(pdf3, 2)
pdf4 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13])
ddf4 = dd.from_pandas(pdf4, 2)
# different columns
pdf5 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [9, 4, 2, 6, 2, 3, 1, 6, 2],
'c': [5, 6, 7, 8, 9, 10, 11, 12, 13]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
ddf5 = dd.from_pandas(pdf5, 2)
pdf6 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3,
'd': [False] * 9,
'e': [True] * 9},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13])
ddf6 = dd.from_pandas(pdf6, 2)
cases = [(ddf1, ddf2, pdf1, pdf2),
(ddf1.repartition([0, 3, 6, 8]), ddf2, pdf1, pdf2),
(ddf1, ddf4, pdf3, pdf4),
(ddf3.repartition([0, 4, 6, 8]), ddf4.repartition([5, 9, 10, 13]),
pdf3, pdf4),
(ddf5, ddf6, pdf5, pdf6),
(ddf5.repartition([0, 4, 7, 8]), ddf6, pdf5, pdf6),
# use pd.DataFrame as cond
(ddf1, pdf2, pdf1, pdf2),
(ddf1, pdf4, pdf3, pdf4),
(ddf5, pdf6, pdf5, pdf6)]
for ddf, ddcond, pdf, pdcond in cases:
assert isinstance(ddf, dd.DataFrame)
assert isinstance(ddcond, (dd.DataFrame, pd.DataFrame))
assert isinstance(pdf, pd.DataFrame)
assert isinstance(pdcond, pd.DataFrame)
assert eq(ddf.where(ddcond), pdf.where(pdcond))
assert eq(ddf.mask(ddcond), pdf.mask(pdcond))
assert eq(ddf.where(ddcond, -ddf), pdf.where(pdcond, -pdf))
assert eq(ddf.mask(ddcond, -ddf), pdf.mask(pdcond, -pdf))
# ToDo: Should work on pandas 0.17
# https://github.com/pydata/pandas/pull/10283
# assert eq(ddf.where(ddcond.a, -ddf), pdf.where(pdcond.a, -pdf))
# assert eq(ddf.mask(ddcond.a, -ddf), pdf.mask(pdcond.a, -pdf))
assert eq(ddf.a.where(ddcond.a), pdf.a.where(pdcond.a))
assert eq(ddf.a.mask(ddcond.a), pdf.a.mask(pdcond.a))
assert eq(ddf.a.where(ddcond.a, -ddf.a), pdf.a.where(pdcond.a, -pdf.a))
assert eq(ddf.a.mask(ddcond.a, -ddf.a), pdf.a.mask(pdcond.a, -pdf.a))
def test_map_partitions_multi_argument():
assert eq(dd.map_partitions(lambda a, b: a + b, None, d.a, d.b),
full.a + full.b)
assert eq(dd.map_partitions(lambda a, b, c: a + b + c, None, d.a, d.b, 1),
full.a + full.b + 1)
def test_map_partitions():
assert eq(d.map_partitions(lambda df: df, columns=d.columns), full)
assert eq(d.map_partitions(lambda df: df), full)
result = d.map_partitions(lambda df: df.sum(axis=1), columns=None)
assert eq(result, full.sum(axis=1))
def test_map_partitions_names():
func = lambda x: x
assert sorted(dd.map_partitions(func, d.columns, d).dask) == \
sorted(dd.map_partitions(func, d.columns, d).dask)
assert sorted(dd.map_partitions(lambda x: x, d.columns, d, token=1).dask) == \
sorted(dd.map_partitions(lambda x: x, d.columns, d, token=1).dask)
func = lambda x, y: x
assert sorted(dd.map_partitions(func, d.columns, d, d).dask) == \
sorted(dd.map_partitions(func, d.columns, d, d).dask)
def test_map_partitions_column_info():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = dd.map_partitions(lambda x: x, a.columns, a)
assert b.columns == a.columns
assert eq(df, b)
b = dd.map_partitions(lambda x: x, a.x.name, a.x)
assert b.name == a.x.name
assert eq(df.x, b)
b = dd.map_partitions(lambda x: x, a.x.name, a.x)
assert b.name == a.x.name
assert eq(df.x, b)
b = dd.map_partitions(lambda df: df.x + df.y, None, a)
assert b.name == None
assert isinstance(b, dd.Series)
b = dd.map_partitions(lambda df: df.x + 1, 'x', a)
assert isinstance(b, dd.Series)
assert b.name == 'x'
def test_map_partitions_method_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = a.map_partitions(lambda x: x)
assert isinstance(b, dd.DataFrame)
assert b.columns == a.columns
b = a.map_partitions(lambda df: df.x + 1, columns=None)
assert isinstance(b, dd.Series)
assert b.name == None
b = a.map_partitions(lambda df: df.x + 1, columns='x')
assert isinstance(b, dd.Series)
assert b.name == 'x'
def test_map_partitions_keeps_kwargs_in_dict():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
def f(s, x=1):
return s + x
b = a.x.map_partitions(f, x=5)
assert "'x': 5" in str(b.dask)
eq(df.x + 5, b)
assert a.x.map_partitions(f, x=5)._name != a.x.map_partitions(f, x=6)._name
def test_drop_duplicates():
assert eq(d.a.drop_duplicates(), full.a.drop_duplicates())
assert eq(d.drop_duplicates(), full.drop_duplicates())
assert eq(d.index.drop_duplicates(), full.index.drop_duplicates())
def test_drop_duplicates_subset():
df = pd.DataFrame({'x': [1, 2, 3, 1, 2, 3],
'y': ['a', 'a', 'b', 'b', 'c', 'c']})
ddf = dd.from_pandas(df, npartitions=2)
if pd.__version__ < '0.17':
kwargs = [{'take_last': False}, {'take_last': True}]
else:
kwargs = [{'keep': 'first'}, {'keep': 'last'}]
for kwarg in kwargs:
assert eq(df.x.drop_duplicates(**kwarg),
ddf.x.drop_duplicates(**kwarg))
for ss in [['x'], 'y', ['x', 'y']]:
assert eq(df.drop_duplicates(subset=ss, **kwarg),
ddf.drop_duplicates(subset=ss, **kwarg))
def test_full_groupby():
assert raises(Exception, lambda: d.groupby('does_not_exist'))
assert raises(Exception, lambda: d.groupby('a').does_not_exist)
assert 'b' in dir(d.groupby('a'))
def func(df):
df['b'] = df.b - df.b.mean()
return df
assert eq(d.groupby('a').apply(func), full.groupby('a').apply(func))
assert sorted(d.groupby('a').apply(func).dask) == \
sorted(d.groupby('a').apply(func).dask)
def test_groupby_on_index():
e = d.set_index('a')
efull = full.set_index('a')
assert eq(d.groupby('a').b.mean(), e.groupby(e.index).b.mean())
def func(df):
df.loc[:, 'b'] = df.b - df.b.mean()
return df
assert eq(d.groupby('a').apply(func).set_index('a'),
e.groupby(e.index).apply(func))
assert eq(d.groupby('a').apply(func), full.groupby('a').apply(func))
assert eq(d.groupby('a').apply(func).set_index('a'),
full.groupby('a').apply(func).set_index('a'))
assert eq(efull.groupby(efull.index).apply(func),
e.groupby(e.index).apply(func))
def test_set_partition():
d2 = d.set_partition('b', [0, 2, 9])
assert d2.divisions == (0, 2, 9)
expected = full.set_index('b')
assert eq(d2, expected)
def test_set_partition_compute():
d2 = d.set_partition('b', [0, 2, 9])
d3 = d.set_partition('b', [0, 2, 9], compute=True)
assert eq(d2, d3)
assert eq(d2, full.set_index('b'))
assert eq(d3, full.set_index('b'))
assert len(d2.dask) > len(d3.dask)
d4 = d.set_partition(d.b, [0, 2, 9])
d5 = d.set_partition(d.b, [0, 2, 9], compute=True)
exp = full.copy()
exp.index = exp.b
assert eq(d4, d5)
assert eq(d4, exp)
assert eq(d5, exp)
assert len(d4.dask) > len(d5.dask)
def test_get_division():
pdf = pd.DataFrame(np.random.randn(10, 5), columns=list('abcde'))
ddf = dd.from_pandas(pdf, 3)
assert ddf.divisions == (0, 4, 8, 9)
# DataFrame
div1 = ddf.get_division(0)
assert isinstance(div1, dd.DataFrame)
eq(div1, pdf.loc[0:3])
div2 = ddf.get_division(1)
eq(div2, pdf.loc[4:7])
div3 = ddf.get_division(2)
eq(div3, pdf.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf)
# Series
div1 = ddf.a.get_division(0)
assert isinstance(div1, dd.Series)
eq(div1, pdf.a.loc[0:3])
div2 = ddf.a.get_division(1)
eq(div2, pdf.a.loc[4:7])
div3 = ddf.a.get_division(2)
eq(div3, pdf.a.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf.a)
assert raises(ValueError, lambda: ddf.get_division(-1))
assert raises(ValueError, lambda: ddf.get_division(3))
def test_categorize():
dsk = {('x', 0): pd.DataFrame({'a': ['Alice', 'Bob', 'Alice'],
'b': ['C', 'D', 'E']},
index=[0, 1, 2]),
('x', 1): pd.DataFrame({'a': ['Bob', 'Charlie', 'Charlie'],
'b': ['A', 'A', 'B']},
index=[3, 4, 5])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 3, 5])
full = d.compute()
c = d.categorize('a')
cfull = c.compute()
assert cfull.dtypes['a'] == 'category'
assert cfull.dtypes['b'] == 'O'
assert list(cfull.a.astype('O')) == list(full.a)
assert (d._get(c.dask, c._keys()[:1])[0].dtypes == cfull.dtypes).all()
assert (d.categorize().compute().dtypes == 'category').all()
def test_ndim():
assert (d.ndim == 2)
assert (d.a.ndim == 1)
assert (d.index.ndim == 1)
def test_dtype():
assert (d.dtypes == full.dtypes).all()
def test_cache():
d2 = d.cache()
assert all(task[0] == getitem for task in d2.dask.values())
assert eq(d2.a, d.a)
def test_value_counts():
df = pd.DataFrame({'x': [1, 2, 1, 3, 3, 1, 4]})
a = dd.from_pandas(df, npartitions=3)
result = a.x.value_counts()
expected = df.x.value_counts()
# because of pandas bug, value_counts doesn't hold name (fixed in 0.17)
# https://github.com/pydata/pandas/pull/10419
assert eq(result, expected, check_names=False)
def test_isin():
assert eq(d.a.isin([0, 1, 2]), full.a.isin([0, 1, 2]))
def test_len():
assert len(d) == len(full)
assert len(d.a) == len(full.a)
def test_quantile():
# series / multiple
result = d.b.quantile([.3, .7])
exp = full.b.quantile([.3, .7]) # result may different
assert len(result) == 2
assert result.divisions == (.3, .7)
assert eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert result.iloc[0] == 0
assert 5 < result.iloc[1] < 6
# index
s = pd.Series(np.arange(10), index=np.arange(10))
ds = dd.from_pandas(s, 2)
result = ds.index.quantile([.3, .7])
exp = s.quantile([.3, .7])
assert len(result) == 2
assert result.divisions == (.3, .7)
assert eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert 1 < result.iloc[0] < 2
assert 7 < result.iloc[1] < 8
# series / single
result = d.b.quantile(.5)
exp = full.b.quantile(.5) # result may different
assert isinstance(result, dd.core.Scalar)
result = result.compute()
assert 4 < result < 6
def test_empty_quantile():
result = d.b.quantile([])
exp = full.b.quantile([])
assert result.divisions == (None, None)
# because of a pandas bug, name is not preserved
# https://github.com/pydata/pandas/pull/10881
assert result.name == 'b'
assert result.compute().name == 'b'
assert eq(result, exp, check_names=False)
def test_dataframe_quantile():
# column X is for test column order and result division
df = pd.DataFrame({'A': np.arange(20),
'X': np.arange(20, 40),
'B': np.arange(10, 30),
'C': ['a', 'b', 'c', 'd'] * 5},
columns=['A', 'X', 'B', 'C'])
ddf = dd.from_pandas(df, 3)
result = ddf.quantile()
assert result.npartitions == 1
assert result.divisions == ('A', 'X')
result = result.compute()
assert isinstance(result, pd.Series)
tm.assert_index_equal(result.index, pd.Index(['A', 'X', 'B']))
assert (result > pd.Series([16, 36, 26], index=['A', 'X', 'B'])).all()
assert (result < pd.Series([17, 37, 27], index=['A', 'X', 'B'])).all()
result = ddf.quantile([0.25, 0.75])
assert result.npartitions == 1
assert result.divisions == (0.25, 0.75)
result = result.compute()
assert isinstance(result, pd.DataFrame)
tm.assert_index_equal(result.index, pd.Index([0.25, 0.75]))
tm.assert_index_equal(result.columns, pd.Index(['A', 'X', 'B']))
minexp = pd.DataFrame([[1, 21, 11], [17, 37, 27]],
index=[0.25, 0.75], columns=['A', 'X', 'B'])
assert (result > minexp).all().all()
maxexp = pd.DataFrame([[2, 22, 12], [18, 38, 28]],
index=[0.25, 0.75], columns=['A', 'X', 'B'])
assert (result < maxexp).all().all()
assert eq(ddf.quantile(axis=1), df.quantile(axis=1))
assert raises(ValueError, lambda: ddf.quantile([0.25, 0.75], axis=1))
def test_index():
assert eq(d.index, full.index)
def test_loc():
assert d.loc[3:8].divisions[0] == 3
assert d.loc[3:8].divisions[-1] == 8
assert d.loc[5].divisions == (5, 5)
assert eq(d.loc[5], full.loc[5])
assert eq(d.loc[3:8], full.loc[3:8])
assert eq(d.loc[:8], full.loc[:8])
assert eq(d.loc[3:], full.loc[3:])
assert eq(d.a.loc[5], full.a.loc[5])
assert eq(d.a.loc[3:8], full.a.loc[3:8])
assert eq(d.a.loc[:8], full.a.loc[:8])
assert eq(d.a.loc[3:], full.a.loc[3:])
assert raises(KeyError, lambda: d.loc[1000])
assert eq(d.loc[1000:], full.loc[1000:])
assert eq(d.loc[-2000:-1000], full.loc[-2000:-1000])
assert sorted(d.loc[5].dask) == sorted(d.loc[5].dask)
assert sorted(d.loc[5].dask) != sorted(d.loc[6].dask)
def test_loc_with_text_dates():
A = tm.makeTimeSeries(10).iloc[:5]
B = tm.makeTimeSeries(10).iloc[5:]
s = dd.Series({('df', 0): A, ('df', 1): B}, 'df', None,
[A.index.min(), A.index.max(), B.index.max()])
assert s.loc['2000': '2010'].divisions == s.divisions
assert eq(s.loc['2000': '2010'], s)
assert len(s.loc['2000-01-03': '2000-01-05'].compute()) == 3
def test_loc_with_series():
assert eq(d.loc[d.a % 2 == 0], full.loc[full.a % 2 == 0])
assert sorted(d.loc[d.a % 2].dask) == sorted(d.loc[d.a % 2].dask)
assert sorted(d.loc[d.a % 2].dask) != sorted(d.loc[d.a % 3].dask)
def test_iloc_raises():
assert raises(NotImplementedError, lambda: d.iloc[:5])
def test_getitem():
df = pd.DataFrame({'A': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'B': [9, 8, 7, 6, 5, 4, 3, 2, 1],
'C': [True, False, True] * 3},
columns=list('ABC'))
ddf = dd.from_pandas(df, 2)
assert eq(ddf['A'], df['A'])
assert eq(ddf[['A', 'B']], df[['A', 'B']])
assert eq(ddf[ddf.C], df[df.C])
assert eq(ddf[ddf.C.repartition([0, 2, 5, 8])], df[df.C])
assert raises(KeyError, lambda: df['X'])
assert raises(KeyError, lambda: df[['A', 'X']])
assert raises(AttributeError, lambda: df.X)
# not str/unicode
df = pd.DataFrame(np.random.randn(10, 5))
ddf = dd.from_pandas(df, 2)
assert eq(ddf[0], df[0])
assert eq(ddf[[1, 2]], df[[1, 2]])
assert raises(KeyError, lambda: df[8])
assert raises(KeyError, lambda: df[[1, 8]])
def test_assign():
assert eq(d.assign(c=d.a + 1, e=d.a + d.b),
full.assign(c=full.a + 1, e=full.a + full.b))
def test_map():
assert eq(d.a.map(lambda x: x + 1), full.a.map(lambda x: x + 1))
def test_concat():
x = _concat([pd.DataFrame(columns=['a', 'b']),
pd.DataFrame(columns=['a', 'b'])])
assert list(x.columns) == ['a', 'b']
assert len(x) == 0
def test_args():
e = d.assign(c=d.a + 1)
f = type(e)(*e._args)
assert eq(e, f)
assert eq(d.a, type(d.a)(*d.a._args))
assert eq(d.a.sum(), type(d.a.sum())(*d.a.sum()._args))
def test_known_divisions():
assert d.known_divisions
df = dd.DataFrame({('x', 0): 'foo', ('x', 1): 'bar'}, 'x',
['a', 'b'], divisions=[None, None, None])
assert not df.known_divisions
df = dd.DataFrame({('x', 0): 'foo'}, 'x',
['a', 'b'], divisions=[0, 1])
assert d.known_divisions
def test_unknown_divisions():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]}),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]})}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [None, None, None, None])
full = d.compute(get=dask.get)
assert eq(d.a.sum(), full.a.sum())
assert eq(d.a + d.b + 1, full.a + full.b + 1)
assert raises(ValueError, lambda: d.loc[3])
def test_concat2():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]}),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]})}
a = dd.DataFrame(dsk, 'x', ['a', 'b'], [None, None])
dsk = {('y', 0): pd.DataFrame({'a': [10, 20, 30], 'b': [40, 50, 60]}),
('y', 1): pd.DataFrame({'a': [40, 50, 60], 'b': [30, 20, 10]}),
('y', 2): pd.DataFrame({'a': [70, 80, 90], 'b': [0, 0, 0]})}
b = dd.DataFrame(dsk, 'y', ['a', 'b'], [None, None])
dsk = {('y', 0): pd.DataFrame({'b': [10, 20, 30], 'c': [40, 50, 60]}),
('y', 1): pd.DataFrame({'b': [40, 50, 60], 'c': [30, 20, 10]})}
c = dd.DataFrame(dsk, 'y', ['b', 'c'], [None, None])
dsk = {('y', 0): pd.DataFrame({'b': [10, 20, 30], 'c': [40, 50, 60],
'd': [70, 80, 90]}),
('y', 1): pd.DataFrame({'b': [40, 50, 60], 'c': [30, 20, 10],
'd': [90, 80, 70]},
index=[3, 4, 5])}
d = dd.DataFrame(dsk, 'y', ['b', 'c', 'd'], [0, 3, 5])
cases = [[a, b], [a, c], [a, d]]
assert dd.concat([a]) is a
for case in cases:
result = dd.concat(case)
pdcase = [c.compute() for c in case]
assert result.npartitions == case[0].npartitions + case[1].npartitions
assert result.divisions == (None, ) * (result.npartitions + 1)
assert eq(pd.concat(pdcase), result)
assert result.dask == dd.concat(case).dask
result = dd.concat(case, join='inner')
assert result.npartitions == case[0].npartitions + case[1].npartitions
assert result.divisions == (None, ) * (result.npartitions + 1)
assert eq(pd.concat(pdcase, join='inner'), result)
assert result.dask == dd.concat(case, join='inner').dask
msg = ('Unable to concatenate DataFrame with unknown division '
'specifying axis=1')
with tm.assertRaisesRegexp(ValueError, msg):
dd.concat(case, axis=1)
def test_concat3():
pdf1 = pd.DataFrame(np.random.randn(6, 5),
columns=list('ABCDE'), index=list('abcdef'))
pdf2 = pd.DataFrame(np.random.randn(6, 5),
columns=list('ABCFG'), index=list('ghijkl'))
pdf3 = pd.DataFrame(np.random.randn(6, 5),
columns=list('ABCHI'), index=list('mnopqr'))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 3)
ddf3 = dd.from_pandas(pdf3, 2)
result = dd.concat([ddf1, ddf2])
assert result.divisions == ddf1.divisions[:-1] + ddf2.divisions
assert result.npartitions == ddf1.npartitions + ddf2.npartitions
assert eq(result, pd.concat([pdf1, pdf2]))
assert eq(dd.concat([ddf1, ddf2], interleave_partitions=True),
pd.concat([pdf1, pdf2]))
result = dd.concat([ddf1, ddf2, ddf3])
assert result.divisions == (ddf1.divisions[:-1] + ddf2.divisions[:-1] +
ddf3.divisions)
assert result.npartitions == (ddf1.npartitions + ddf2.npartitions +
ddf3.npartitions)
assert eq(result, pd.concat([pdf1, pdf2, pdf3]))
assert eq(dd.concat([ddf1, ddf2, ddf3], interleave_partitions=True),
pd.concat([pdf1, pdf2, pdf3]))
def test_concat4_interleave_partitions():
pdf1 = pd.DataFrame(np.random.randn(10, 5),
columns=list('ABCDE'), index=list('abcdefghij'))
pdf2 = pd.DataFrame(np.random.randn(13, 5),
columns=list('ABCDE'), index=list('fghijklmnopqr'))
pdf3 = pd.DataFrame(np.random.randn(13, 6),
columns=list('CDEXYZ'), index=list('fghijklmnopqr'))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 3)
ddf3 = dd.from_pandas(pdf3, 2)
msg = ('All inputs have known divisions which cannnot be '
'concatenated in order. Specify '
'interleave_partitions=True to ignore order')
cases = [[ddf1, ddf1], [ddf1, ddf2], [ddf1, ddf3], [ddf2, ddf1],
[ddf2, ddf3], [ddf3, ddf1], [ddf3, ddf2]]
for case in cases:
pdcase = [c.compute() for c in case]
with tm.assertRaisesRegexp(ValueError, msg):
dd.concat(case)
assert eq(dd.concat(case, interleave_partitions=True),
pd.concat(pdcase))
assert eq(dd.concat(case, join='inner', interleave_partitions=True),
pd.concat(pdcase, join='inner'))
msg = "'join' must be 'inner' or 'outer'"
with tm.assertRaisesRegexp(ValueError, msg):
dd.concat([ddf1, ddf1], join='invalid', interleave_partitions=True)
def test_concat5():
pdf1 = pd.DataFrame(np.random.randn(7, 5),
columns=list('ABCDE'), index=list('abcdefg'))
pdf2 = pd.DataFrame(np.random.randn(7, 6),
columns=list('FGHIJK'), index=list('abcdefg'))
pdf3 = pd.DataFrame(np.random.randn(7, 6),
columns=list('FGHIJK'), index=list('cdefghi'))
pdf4 = pd.DataFrame(np.random.randn(7, 5),
columns=list('FGHAB'), index=list('cdefghi'))
pdf5 = pd.DataFrame(np.random.randn(7, 5),
columns=list('FGHAB'), index=list('fklmnop'))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 3)
ddf3 = dd.from_pandas(pdf3, 2)
ddf4 = dd.from_pandas(pdf4, 2)
ddf5 = dd.from_pandas(pdf5, 3)
cases = [[ddf1, ddf2], [ddf1, ddf3], [ddf1, ddf4], [ddf1, ddf5],
[ddf3, ddf4], [ddf3, ddf5], [ddf5, ddf1, ddf4], [ddf5, ddf3],
[ddf1.A, ddf4.A], [ddf2.F, ddf3.F], [ddf4.A, ddf5.A],
[ddf1.A, ddf4.F], [ddf2.F, ddf3.H], [ddf4.A, ddf5.B],
[ddf1, ddf4.A], [ddf3.F, ddf2], [ddf5, ddf1.A, ddf2]]
for case in cases:
pdcase = [c.compute() for c in case]
assert eq(dd.concat(case, interleave_partitions=True),
pd.concat(pdcase))
assert eq(dd.concat(case, join='inner', interleave_partitions=True),
pd.concat(pdcase, join='inner'))
assert eq(dd.concat(case, axis=1), pd.concat(pdcase, axis=1))
assert eq(dd.concat(case, axis=1, join='inner'),
pd.concat(pdcase, axis=1, join='inner'))
# Dask + pandas
cases = [[ddf1, pdf2], [ddf1, pdf3], [pdf1, ddf4],
[pdf1.A, ddf4.A], [ddf2.F, pdf3.F],
[ddf1, pdf4.A], [ddf3.F, pdf2], [ddf2, pdf1, ddf3.F]]
for case in cases:
pdcase = [c.compute() if isinstance(c, _Frame) else c for c in case]
assert eq(dd.concat(case, interleave_partitions=True),
pd.concat(pdcase))
assert eq(dd.concat(case, join='inner', interleave_partitions=True),
pd.concat(pdcase, join='inner'))
assert eq(dd.concat(case, axis=1), pd.concat(pdcase, axis=1))
assert eq(dd.concat(case, axis=1, join='inner'),
pd.concat(pdcase, axis=1, join='inner'))
def test_append():
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
'b': [1, 2, 3, 4, 5, 6]})
df2 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
'b': [1, 2, 3, 4, 5, 6]},
index=[6, 7, 8, 9, 10, 11])
df3 = pd.DataFrame({'b': [1, 2, 3, 4, 5, 6],
'c': [1, 2, 3, 4, 5, 6]},
index=[6, 7, 8, 9, 10, 11])
ddf = dd.from_pandas(df, 2)
ddf2 = dd.from_pandas(df2, 2)
ddf3 = dd.from_pandas(df3, 2)
assert eq(ddf.append(ddf2), df.append(df2))
assert eq(ddf.a.append(ddf2.a), df.a.append(df2.a))
# different columns
assert eq(ddf.append(ddf3), df.append(df3))
assert eq(ddf.a.append(ddf3.b), df.a.append(df3.b))
# dask + pandas
assert eq(ddf.append(df2), df.append(df2))
assert eq(ddf.a.append(df2.a), df.a.append(df2.a))
assert eq(ddf.append(df3), df.append(df3))
assert eq(ddf.a.append(df3.b), df.a.append(df3.b))
s = pd.Series([7, 8], name=6, index=['a', 'b'])
assert eq(ddf.append(s), df.append(s))
df4 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
'b': [1, 2, 3, 4, 5, 6]},
index=[4, 5, 6, 7, 8, 9])
ddf4 = dd.from_pandas(df4, 2)
msg = ("Unable to append two dataframes to each other with known "
"divisions if those divisions are not ordered. "
"The divisions/index of the second dataframe must be "
"greater than the divisions/index of the first dataframe.")
with tm.assertRaisesRegexp(ValueError, msg):
ddf.append(ddf4)
def test_append2():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]}),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]})}
ddf1 = dd.DataFrame(dsk, 'x', ['a', 'b'], [None, None])
dsk = {('y', 0): pd.DataFrame({'a': [10, 20, 30], 'b': [40, 50, 60]}),
('y', 1): pd.DataFrame({'a': [40, 50, 60], 'b': [30, 20, 10]}),
('y', 2): pd.DataFrame({'a': [70, 80, 90], 'b': [0, 0, 0]})}
ddf2 = dd.DataFrame(dsk, 'y', ['a', 'b'], [None, None])
dsk = {('y', 0): pd.DataFrame({'b': [10, 20, 30], 'c': [40, 50, 60]}),
('y', 1): pd.DataFrame({'b': [40, 50, 60], 'c': [30, 20, 10]})}
ddf3 = dd.DataFrame(dsk, 'y', ['b', 'c'], [None, None])
assert eq(ddf1.append(ddf2), ddf1.compute().append(ddf2.compute()))
assert eq(ddf2.append(ddf1), ddf2.compute().append(ddf1.compute()))
# Series + DataFrame
assert eq(ddf1.a.append(ddf2), ddf1.a.compute().append(ddf2.compute()))
assert eq(ddf2.a.append(ddf1), ddf2.a.compute().append(ddf1.compute()))
# different columns
assert eq(ddf1.append(ddf3), ddf1.compute().append(ddf3.compute()))
assert eq(ddf3.append(ddf1), ddf3.compute().append(ddf1.compute()))
# Series + DataFrame
assert eq(ddf1.a.append(ddf3), ddf1.a.compute().append(ddf3.compute()))
assert eq(ddf3.b.append(ddf1), ddf3.b.compute().append(ddf1.compute()))
# Dask + pandas
assert eq(ddf1.append(ddf2.compute()), ddf1.compute().append(ddf2.compute()))
assert eq(ddf2.append(ddf1.compute()), ddf2.compute().append(ddf1.compute()))
# Series + DataFrame
assert eq(ddf1.a.append(ddf2.compute()), ddf1.a.compute().append(ddf2.compute()))
assert eq(ddf2.a.append(ddf1.compute()), ddf2.a.compute().append(ddf1.compute()))
# different columns
assert eq(ddf1.append(ddf3.compute()), ddf1.compute().append(ddf3.compute()))
assert eq(ddf3.append(ddf1.compute()), ddf3.compute().append(ddf1.compute()))
# Series + DataFrame
assert eq(ddf1.a.append(ddf3.compute()), ddf1.a.compute().append(ddf3.compute()))
assert eq(ddf3.b.append(ddf1.compute()), ddf3.b.compute().append(ddf1.compute()))
def test_dataframe_series_are_dillable():
try:
import dill
except ImportError:
return
e = d.groupby(d.a).b.sum()
f = dill.loads(dill.dumps(e))
assert eq(e, f)
def test_dataframe_series_are_pickleable():
try:
import cloudpickle
import pickle
except ImportError:
return
dumps = cloudpickle.dumps
loads = pickle.loads
e = d.groupby(d.a).b.sum()
f = loads(dumps(e))
assert eq(e, f)
def test_random_partitions():
a, b = d.random_split([0.5, 0.5])
assert isinstance(a, dd.DataFrame)
assert isinstance(b, dd.DataFrame)
assert len(a.compute()) + len(b.compute()) == len(full)
def test_series_nunique():
ps = pd.Series(list('aaabbccccdddeee'), name='a')
s = dd.from_pandas(ps, npartitions=3)
assert eq(s.nunique(), ps.nunique())
def test_dataframe_groupby_nunique():
strings = list('aaabbccccdddeee')
data = np.random.randn(len(strings))
ps = pd.DataFrame(dict(strings=strings, data=data))
s = dd.from_pandas(ps, npartitions=3)
expected = ps.groupby('strings')['data'].nunique()
assert eq(s.groupby('strings')['data'].nunique(), expected)
def test_dataframe_groupby_nunique_across_group_same_value():
strings = list('aaabbccccdddeee')
data = list(map(int, '123111223323412'))
ps = pd.DataFrame(dict(strings=strings, data=data))
s = dd.from_pandas(ps, npartitions=3)
expected = ps.groupby('strings')['data'].nunique()
assert eq(s.groupby('strings')['data'].nunique(), expected)
@pytest.mark.parametrize(['npartitions', 'freq', 'closed', 'label'],
list(product([2, 5], ['30T', 'h', 'd', 'w', 'M'],
['right', 'left'], ['right', 'left'])))
def test_series_resample(npartitions, freq, closed, label):
index = pd.date_range('1-1-2000', '2-15-2000', freq='h')
index = index.union(pd.date_range('4-15-2000', '5-15-2000', freq='h'))
df = pd.Series(range(len(index)), index=index)
ds = dd.from_pandas(df, npartitions=npartitions)
# Series output
result = ds.resample(freq, how='mean', closed=closed, label=label).compute()
expected = df.resample(freq, how='mean', closed=closed, label=label)
tm.assert_series_equal(result, expected, check_dtype=False)
# Frame output
resampled = ds.resample(freq, how='ohlc', closed=closed, label=label)
divisions = resampled.divisions
result = resampled.compute()
expected = df.resample(freq, how='ohlc', closed=closed, label=label)
tm.assert_frame_equal(result, expected, check_dtype=False)
assert expected.index[0] == divisions[0]
assert expected.index[-1] == divisions[-1]
def test_series_resample_not_implemented():
index = pd.date_range(start='20120102', periods=100, freq='T')
s = pd.Series(range(len(index)), index=index)
ds = dd.from_pandas(s, npartitions=5)
# Frequency doesn't evenly divide day
assert raises(NotImplementedError, lambda: ds.resample('57T'))
# Kwargs not implemented
kwargs = {'fill_method': 'bfill', 'limit': 2, 'loffset': 2, 'base': 2,
'convention': 'end', 'kind': 'period'}
for k, v in kwargs.items():
assert raises(NotImplementedError, lambda: ds.resample('6h', **{k: v}))
def test_set_partition_2():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')})
ddf = dd.from_pandas(df, 2)
result = ddf.set_partition('y', ['a', 'c', 'd'])
assert result.divisions == ('a', 'c', 'd')
assert list(result.compute(get=get_sync).index[-2:]) == ['d', 'd']
def test_repartition():
def _check_split_data(orig, d):
"""Check data is split properly"""
keys = [k for k in d.dask if k[0].startswith('repartition-split')]
keys = sorted(keys)
sp = pd.concat([d._get(d.dask, k) for k in keys])
assert eq(orig, sp)
assert eq(orig, d)
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.repartition(divisions=[10, 20, 50, 60])
assert b.divisions == (10, 20, 50, 60)
assert eq(a, b)
assert eq(a._get(b.dask, (b._name, 0)), df.iloc[:1])
for div in [[20, 60], [10, 50], [1], # first / last element mismatch
[0, 60], [10, 70], # do not allow to expand divisions by default
[10, 50, 20, 60], # not sorted
[10, 10, 20, 60]]: # not unique (last element can be duplicated)
assert raises(ValueError, lambda: a.repartition(divisions=div))
pdf = pd.DataFrame(np.random.randn(7, 5), columns=list('abxyz'))
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert eq(ddf, pdf)
for div in [[0, 6], [0, 6, 6], [0, 5, 6], [0, 4, 6, 6],
[0, 2, 6], [0, 2, 6, 6],
[0, 2, 3, 6, 6], [0, 1, 2, 3, 4, 5, 6, 6]]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert eq(pdf.x, rds)
# expand divisions
for div in [[-5, 10], [-2, 3, 5, 6], [0, 4, 5, 9, 10]]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert eq(pdf.x, rds)
pdf = pd.DataFrame({'x': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
'y': [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]},
index=list('abcdefghij'))
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert eq(ddf, pdf)
for div in [list('aj'), list('ajj'), list('adj'),
list('abfj'), list('ahjj'), list('acdj'), list('adfij'),
list('abdefgij'), list('abcdefghij')]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert eq(pdf.x, rds)
# expand divisions
for div in [list('Yadijm'), list('acmrxz'), list('Yajz')]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert eq(pdf.x, rds)
def test_repartition_divisions():
result = repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c') # doctest: +SKIP
assert result == {('b', 0): (_loc, ('a', 0), 1, 3, False),
('b', 1): (_loc, ('a', 1), 3, 4, False),
('b', 2): (_loc, ('a', 1), 4, 6, False),
('b', 3): (_loc, ('a', 1), 6, 7, True),
('c', 0): (pd.concat, (list, [('b', 0), ('b', 1)])),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
def test_repartition_on_pandas_dataframe():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
ddf = dd.repartition(df, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.DataFrame)
assert ddf.divisions == (10, 20, 50, 60)
assert eq(ddf, df)
ddf = dd.repartition(df.y, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.Series)
assert ddf.divisions == (10, 20, 50, 60)
assert eq(ddf, df.y)
def test_embarrassingly_parallel_operations():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
assert eq(a.x.astype('float32'), df.x.astype('float32'))
assert a.x.astype('float32').compute().dtype == 'float32'
assert eq(a.x.dropna(), df.x.dropna())
assert eq(a.x.fillna(100), df.x.fillna(100))
assert eq(a.fillna(100), df.fillna(100))
assert eq(a.x.between(2, 4), df.x.between(2, 4))
assert eq(a.x.clip(2, 4), df.x.clip(2, 4))
assert eq(a.x.notnull(), df.x.notnull())
assert len(a.sample(0.5).compute()) < len(df)
def test_sample():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.sample(0.5)
assert eq(b, b)
c = a.sample(0.5, random_state=1234)
d = a.sample(0.5, random_state=1234)
assert eq(c, d)
assert a.sample(0.5)._name != a.sample(0.5)._name
def test_datetime_accessor():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
df['x'] = df.x.astype('M8[us]')
a = dd.from_pandas(df, 2)
assert 'date' in dir(a.x.dt)
# pandas loses Series.name via datetime accessor
# see https://github.com/pydata/pandas/issues/10712
assert eq(a.x.dt.date, df.x.dt.date, check_names=False)
assert (a.x.dt.to_pydatetime().compute() == df.x.dt.to_pydatetime()).all()
assert a.x.dt.date.dask == a.x.dt.date.dask
assert a.x.dt.to_pydatetime().dask == a.x.dt.to_pydatetime().dask
def test_str_accessor():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'D']})
a = dd.from_pandas(df, 2)
assert 'upper' in dir(a.x.str)
assert eq(a.x.str.upper(), df.x.str.upper())
assert a.x.str.upper().dask == a.x.str.upper().dask
def test_empty_max():
df = pd.DataFrame({'x': [1, 2, 3]})
a = dd.DataFrame({('x', 0): pd.DataFrame({'x': [1]}),
('x', 1): pd.DataFrame({'x': []})}, 'x',
['x'], [None, None, None])
assert eq(a.x.max(), 1)
def test_loc_on_numpy_datetimes():
df = pd.DataFrame({'x': [1, 2, 3]},
index=list(map(np.datetime64, ['2014', '2015', '2016'])))
a = dd.from_pandas(df, 2)
a.divisions = list(map(np.datetime64, a.divisions))
assert eq(a.loc['2014': '2015'], a.loc['2014': '2015'])
def test_loc_on_pandas_datetimes():
df = pd.DataFrame({'x': [1, 2, 3]},
index=list(map(pd.Timestamp, ['2014', '2015', '2016'])))
a = dd.from_pandas(df, 2)
a.divisions = list(map(pd.Timestamp, a.divisions))
assert eq(a.loc['2014': '2015'], a.loc['2014': '2015'])
def test_coerce_loc_index():
for t in [pd.Timestamp, np.datetime64]:
assert isinstance(_coerce_loc_index([t('2014')], '2014'), t)
def test_nlargest_series():
s = pd.Series([1, 3, 5, 2, 4, 6])
ss = dd.from_pandas(s, npartitions=2)
assert eq(ss.nlargest(2), s.nlargest(2))
def test_categorical_set_index():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': ['a', 'b', 'b', 'c']})
df['y'] = df.y.astype('category')
a = dd.from_pandas(df, npartitions=2)
with dask.set_options(get=get_sync):
b = a.set_index('y')
df2 = df.set_index('y')
assert list(b.index.compute()), list(df2.index)
b = a.set_index(a.y)
df2 = df.set_index(df.y)
assert list(b.index.compute()), list(df2.index)
def test_query():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
q = a.query('x**2 > y')
with ignoring(ImportError):
assert eq(q, df.query('x**2 > y'))
def test_deterministic_arithmetic_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert sorted((a.x + a.y ** 2).dask) == sorted((a.x + a.y ** 2).dask)
assert sorted((a.x + a.y ** 2).dask) != sorted((a.x + a.y ** 3).dask)
assert sorted((a.x + a.y ** 2).dask) != sorted((a.x - a.y ** 2).dask)
def test_deterministic_reduction_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert a.x.sum()._name == a.x.sum()._name
assert a.x.mean()._name == a.x.mean()._name
assert a.x.var()._name == a.x.var()._name
assert a.x.min()._name == a.x.min()._name
assert a.x.max()._name == a.x.max()._name
assert a.x.count()._name == a.x.count()._name
# Test reduction without token string
assert sorted(reduction(a.x, len, np.sum).dask) !=\
sorted(reduction(a.x, np.sum, np.sum).dask)
assert sorted(reduction(a.x, len, np.sum).dask) ==\
sorted(reduction(a.x, len, np.sum).dask)
def test_deterministic_apply_concat_apply_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert sorted(a.x.nlargest(2).dask) == sorted(a.x.nlargest(2).dask)
assert sorted(a.x.nlargest(2).dask) != sorted(a.x.nlargest(3).dask)
assert sorted(a.x.drop_duplicates().dask) == \
sorted(a.x.drop_duplicates().dask)
assert sorted(a.groupby('x').y.mean().dask) == \
sorted(a.groupby('x').y.mean().dask)
# Test aca without passing in token string
f = lambda a: a.nlargest(5)
f2 = lambda a: a.nlargest(3)
assert sorted(aca(a.x, f, f, a.x.name).dask) !=\
sorted(aca(a.x, f2, f2, a.x.name).dask)
assert sorted(aca(a.x, f, f, a.x.name).dask) ==\
sorted(aca(a.x, f, f, a.x.name).dask)
def test_gh_517():
arr = np.random.randn(100, 2)
df = pd.DataFrame(arr, columns=['a', 'b'])
ddf = dd.from_pandas(df, 2)
assert ddf.index.nunique().compute() == 100
ddf2 = dd.from_pandas(pd.concat([df, df]), 5)
assert ddf2.index.nunique().compute() == 100
def test_drop_axis_1():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert eq(a.drop('y', axis=1), df.drop('y', axis=1))
def test_gh580():
df = pd.DataFrame({'x': np.arange(10, dtype=float)})
ddf = dd.from_pandas(df, 2)
assert eq(np.cos(df['x']), np.cos(ddf['x']))
assert eq(np.cos(df['x']), np.cos(ddf['x']))
def test_rename_dict():
renamer = {'a': 'A', 'b': 'B'}
assert eq(d.rename(columns=renamer),
full.rename(columns=renamer))
def test_rename_function():
renamer = lambda x: x.upper()
assert eq(d.rename(columns=renamer),
full.rename(columns=renamer))
def test_rename_index():
renamer = {0: 1}
assert raises(ValueError, lambda: d.rename(index=renamer))
def test_to_frame():
s = pd.Series([1, 2, 3], name='foo')
a = dd.from_pandas(s, npartitions=2)
assert eq(s.to_frame(), a.to_frame())
assert eq(s.to_frame('bar'), a.to_frame('bar'))
def test_series_groupby_propagates_names():
df = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
ddf = dd.from_pandas(df, 2)
func = lambda df: df['y'].sum()
result = ddf.groupby('x').apply(func, columns='y')
expected = df.groupby('x').apply(func)
expected.name = 'y'
tm.assert_series_equal(result.compute(), expected)
def test_series_groupby():
s = pd.Series([1, 2, 2, 1, 1])
pd_group = s.groupby(s)
ss = dd.from_pandas(s, npartitions=2)
dask_group = ss.groupby(ss)
pd_group2 = s.groupby(s + 1)
dask_group2 = ss.groupby(ss + 1)
for dg, pdg in [(dask_group, pd_group), (pd_group2, dask_group2)]:
assert eq(dg.count(), pdg.count())
assert eq(dg.sum(), pdg.sum())
assert eq(dg.min(), pdg.min())
assert eq(dg.max(), pdg.max())
assert raises(TypeError, lambda: ss.groupby([1, 2]))
sss = dd.from_pandas(s, npartitions=3)
assert raises(NotImplementedError, lambda: ss.groupby(sss))
def test_apply():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
a = dd.from_pandas(df, npartitions=2)
func = lambda row: row['x'] + row['y']
eq(a.x.apply(lambda x: x + 1), df.x.apply(lambda x: x + 1))
eq(a.apply(lambda xy: xy[0] + xy[1], axis=1, columns=None),
df.apply(lambda xy: xy[0] + xy[1], axis=1))
assert raises(NotImplementedError, lambda: a.apply(lambda xy: xy, axis=0))
assert raises(ValueError, lambda: a.apply(lambda xy: xy, axis=1))
func = lambda x: pd.Series([x, x])
eq(a.x.apply(func, name=[0, 1]), df.x.apply(func))
def test_index_time_properties():
i = tm.makeTimeSeries()
a = dd.from_pandas(i, npartitions=3)
assert (i.index.day == a.index.day.compute()).all()
assert (i.index.month == a.index.month.compute()).all()
@pytest.mark.skipif(LooseVersion(pd.__version__) <= '0.16.2',
reason="nlargest not in pandas pre 0.16.2")
def test_nlargest():
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10])})
ddf = dd.from_pandas(df, npartitions=2)
res = ddf.nlargest(5, 'a')
exp = df.nlargest(5, 'a')
eq(res, exp)
@pytest.mark.skipif(LooseVersion(pd.__version__) <= '0.16.2',
reason="nlargest not in pandas pre 0.16.2")
def test_nlargest_multiple_columns():
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
ddf = dd.from_pandas(df, npartitions=2)
result = ddf.nlargest(5, ['a', 'b'])
expected = df.nlargest(5, ['a', 'b'])
eq(result, expected)
def test_groupby_index_array():
df = tm.makeTimeDataFrame()
ddf = dd.from_pandas(df, npartitions=2)
eq(df.A.groupby(df.index.month).nunique(),
ddf.A.groupby(ddf.index.month).nunique(), check_names=False)
def test_groupby_set_index():
df = tm.makeTimeDataFrame()
ddf = dd.from_pandas(df, npartitions=2)
assert raises(NotImplementedError,
lambda: ddf.groupby(df.index.month, as_index=False))
def test_reset_index():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
res = ddf.reset_index()
exp = df.reset_index()
assert len(res.index.compute()) == len(exp.index)
assert res.columns == tuple(exp.columns)
assert_array_almost_equal(res.compute().values, exp.values)
def test_dataframe_compute_forward_kwargs():
x = dd.from_pandas(pd.DataFrame({'a': range(10)}), npartitions=2).a.sum()
x.compute(bogus_keyword=10)
def test_series_iteritems():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df['x'].iteritems(), ddf['x'].iteritems()):
assert a == b
def test_dataframe_iterrows():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.iterrows(), ddf.iterrows()):
tm.assert_series_equal(a[1], b[1])
def test_dataframe_itertuples():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.itertuples(), ddf.itertuples()):
assert a == b
| bsd-3-clause |
johannesmik/neurons | MISC/plots/eta.py | 2 | 1132 | """
Show plots of the eta function for different
"""
import numpy as np
import matplotlib.pyplot as plt
def eta(s, eta_reset, t_membran):
ret = np.zeros(s.size)
ret = - eta_reset * np.exp(-s/t_membran)
ret[s < 0] = 0
ret[s == 0] = 0.9 # Spike
return ret
def plot_eta(ax, eta_reset, t_membran):
x = np.linspace(-100, 500, num=601)
if t_membran != 0:
labelstr = r'$\eta_0 = %.1f, \tau_m = %.0f$' % (eta_reset, t_membran)
ax.plot(x, eta(x, eta_reset, t_membran), label=labelstr)
ax.legend(prop={'size':12})
ax.set_xlabel('time in ms')
ax.set_ylabel('current in mV')
ax.set_ylim([-1, 1])
ax.set_xlim([-10, 200])
if __name__ == "__main__":
eta_resets = [0.3, 0.7]
t_membranes = [10, 30]
for i, eta_reset in enumerate(eta_resets):
for j, t_membrane in enumerate(t_membranes):
ax = plt.subplot2grid((len(eta_resets), len(t_membranes)), (i, j))
plot_eta(ax, eta_reset, t_membrane)
plt.suptitle(r'The $\eta(s)$ function for different values of $\eta_0$ and $\tau_m$', fontsize=16)
plt.show()
| bsd-2-clause |
leesavide/pythonista-docs | Documentation/matplotlib/examples/misc/contour_manual.py | 12 | 1630 | """
Example of displaying your own contour lines and polygons using ContourSet.
"""
import matplotlib.pyplot as plt
from matplotlib.contour import ContourSet
import matplotlib.cm as cm
# Contour lines for each level are a list/tuple of polygons.
lines0 = [ [[0,0],[0,4]] ]
lines1 = [ [[2,0],[1,2],[1,3]] ]
lines2 = [ [[3,0],[3,2]], [[3,3],[3,4]] ] # Note two lines.
# Filled contours between two levels are also a list/tuple of polygons.
# Points can be ordered clockwise or anticlockwise.
filled01 = [ [[0,0],[0,4],[1,3],[1,2],[2,0]] ]
filled12 = [ [[2,0],[3,0],[3,2],[1,3],[1,2]], # Note two polygons.
[[1,4],[3,4],[3,3]] ]
plt.figure()
# Filled contours using filled=True.
cs = ContourSet(plt.gca(), [0,1,2], [filled01, filled12], filled=True, cmap=cm.bone)
cbar = plt.colorbar(cs)
# Contour lines (non-filled).
lines = ContourSet(plt.gca(), [0,1,2], [lines0, lines1, lines2], cmap=cm.cool,
linewidths=3)
cbar.add_lines(lines)
plt.axis([-0.5, 3.5, -0.5, 4.5])
plt.title('User-specified contours')
# Multiple filled contour lines can be specified in a single list of polygon
# vertices along with a list of vertex kinds (code types) as described in the
# Path class. This is particularly useful for polygons with holes.
# Here a code type of 1 is a MOVETO, and 2 is a LINETO.
plt.figure()
filled01 = [ [[0,0],[3,0],[3,3],[0,3],[1,1],[1,2],[2,2],[2,1]] ]
kinds01 = [ [1,2,2,2,1,2,2,2] ]
cs = ContourSet(plt.gca(), [0,1], [filled01], [kinds01], filled=True)
cbar = plt.colorbar(cs)
plt.axis([-0.5, 3.5, -0.5, 3.5])
plt.title('User specified filled contours with holes')
plt.show() | apache-2.0 |
iismd17/scikit-learn | sklearn/utils/estimator_checks.py | 21 | 51976 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils import ConvergenceWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError :
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required."
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
sanketloke/scikit-learn | examples/applications/plot_stock_market.py | 76 | 8522 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
try:
from matplotlib.finance import quotes_historical_yahoo_ochl
except ImportError:
# quotes_historical_yahoo_ochl was named quotes_historical_yahoo before matplotlib 1.4
from matplotlib.finance import quotes_historical_yahoo as quotes_historical_yahoo_ochl
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [quotes_historical_yahoo_ochl(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
kylerbrown/scikit-learn | sklearn/tests/test_calibration.py | 213 | 12219 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight_warning():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
# LinearSVC does not currently support sample weights but they
# can still be used for the calibration step (with a warning)
msg = "LinearSVC does not support sample_weight."
assert_warns_message(
UserWarning, msg,
calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
| bsd-3-clause |
dbrowneup/PacificBlue | src/PacificBlue.py | 1 | 3661 | #!/usr/bin/env python
#PacificBlue Genome Scaffolding Tool for PacBio Long Reads
#
#Written by Dan Browne
#
#Devarenne Lab
#Department of Biochemistry & Biophysics
#Texas A&M University
#College Station, TX
#
#Contact: dbrowne.up@gmail.com
#Load modules
import sys
import argparse
from datetime import datetime
from multiprocessing import Pool
from math import ceil
import matplotlib
import matplotlib.pyplot as plt
from PacbioMapping import PacbioMapping
from PacbioSubgraph import PacbioSubgraph
from ScaffoldGraph import ScaffoldGraph
from PathFinder import PathFinder
#Parse command-line arguments
parser = argparse.ArgumentParser(description="PacificBlue Genome Scaffolder")
parser.add_argument('--blasr-alignments', help="BLASR alignment file", dest='blasr_alignments', metavar='FILE')
parser.add_argument('--blasr-format', help="Format of BLASR alignments", dest='blasr_format', default="m1", choices=['m1', 'm4', 'm5'])
parser.add_argument('--fasta', help="Fasta sequences to scaffold", dest='fasta_file', metavar='FILE')
parser.add_argument('--threads', help="Number of CPUs to use", dest='num_threads', metavar='NN', type=int, default=1)
parser.add_argument('--output', help="Name of output file", dest='output_file', metavar='FILE')
parser.add_argument('--cov_cutoff', help="PacbioSubgraph coverage cutoff (default=3)", dest='cov_cutoff', metavar='INT', type=int, default=3)
parser.add_argument('--fraction', help="PacbioSubgraph repeat fraction (default=0.5)", dest='fraction', metavar='(0,1]', type=float, default=0.5)
parser.add_argument('--edge_cutoff', help="ScaffoldGraph edge cutoff (default=0.25)", dest='edge_cutoff', metavar='(0,1]', type=float, default=0.25)
parser.add_argument('--edge_weight', help="ScaffoldGraph edge weight requirement (default=1)", dest='edge_weight', metavar='INT', type=int, default=1)
args = parser.parse_args()
#Worker function for parallel processing of read alignments
def parallel_subgraph(item):
read, mapping = item
sg = PacbioSubgraph(read, mapping, cov_cutoff=args.cov_cutoff, fraction=args.fraction)
if len(sg.Connects) == 0:
del sg
return
n = len(sg.Connects)
report = []
for a1, a2, d in sg.Connects:
v1 = -1 * a1.tName if a1.tStrand == 1 else a1.tName
v2 = -1 * a2.tName if a2.tStrand == 1 else a2.tName
report.append((v1, v2, d, n))
del sg
return report
#Main function to run PacificBlue pipeline
def main():
#Load BLASR alignments into PacbioMapping object
mapping = PacbioMapping(args.blasr_alignments, fileFormat=args.blasr_format)
#Process read alignments in parallel
print "Entering parallel PacbioSubgraph module:", str(datetime.now())
print "Number of threads to use:", args.num_threads
sg_pool = Pool(processes=args.num_threads, maxtasksperchild=100)
data = mapping.readToContig.iteritems()
connection_lists = []
for x in sg_pool.imap_unordered(parallel_subgraph, data, chunksize=100):
if x is not None:
connection_lists.append(x)
sg_pool.close()
sg_pool.join()
print "Leaving parallel PacbioSubgraph module:", str(datetime.now())
#Load reported connections in ScaffoldGraph object
scaff = ScaffoldGraph(args.fasta_file, connection_lists, edge_cutoff=args.edge_cutoff, edge_weight=args.edge_weight)
#Build scaffold sequences with PathFinder object
paths = PathFinder(scaff.G)
#Write output to Fasta file
out = open(args.output_file, "w")
for i, seq in enumerate(paths.scaffolds):
out.write(">scaffold_"+str(i+1)+"\n")
out.write(seq+"\n")
out.close()
if __name__== '__main__':
main()
| gpl-3.0 |
evidation-health/pymc3 | pymc3/tests/test_plots.py | 13 | 1721 | import matplotlib
matplotlib.use('Agg', warn=False)
import numpy as np
from .checks import close_to
import pymc3.plots
from pymc3.plots import *
from pymc3 import Slice, Metropolis, find_hessian, sample
def test_plots():
# Test single trace
from pymc3.examples import arbitrary_stochastic as asmod
with asmod.model as model:
start = model.test_point
h = find_hessian(start)
step = Metropolis(model.vars, h)
trace = sample(3000, step, start)
traceplot(trace)
forestplot(trace)
autocorrplot(trace)
def test_plots_multidimensional():
# Test single trace
from .models import multidimensional_model
start, model, _ = multidimensional_model()
with model as model:
h = np.diag(find_hessian(start))
step = Metropolis(model.vars, h)
trace = sample(3000, step, start)
traceplot(trace)
#forestplot(trace)
#autocorrplot(trace)
def test_multichain_plots():
from pymc3.examples import disaster_model as dm
with dm.model as model:
# Run sampler
step1 = Slice([dm.early_mean, dm.late_mean])
step2 = Metropolis([dm.switchpoint])
start = {'early_mean': 2., 'late_mean': 3., 'switchpoint': 50}
ptrace = sample(1000, [step1, step2], start, njobs=2)
forestplot(ptrace, vars=['early_mean', 'late_mean'])
autocorrplot(ptrace, vars=['switchpoint'])
def test_make_2d():
a = np.arange(4)
close_to(pymc3.plots.make_2d(a), a[:,None], 0)
n = 7
a = np.arange(n*4*5).reshape((n,4,5))
res = pymc3.plots.make_2d(a)
assert res.shape == (n,20)
close_to(a[:,0,0], res[:,0], 0)
close_to(a[:,3,2], res[:,2*4+3], 0)
| apache-2.0 |
simpeg/discretize | discretize/utils/code_utils.py | 1 | 7209 | import numpy as np
import warnings
SCALARTYPES = (complex, float, int, np.number)
def is_scalar(f):
"""Determine if the input argument is a scalar.
The function **is_scalar** returns *True* if the input is an integer,
float or complex number. The function returns *False* otherwise.
Parameters
----------
f :
Any input quantity
Returns
-------
bool :
- *True* if the input argument is an integer, float or complex number
- *False* otherwise
"""
if isinstance(f, SCALARTYPES):
return True
elif isinstance(f, np.ndarray) and f.size == 1 and isinstance(f[0], SCALARTYPES):
return True
return False
def as_array_n_by_dim(pts, dim):
"""Verifies the dimensions of a 2D array.
The function **as_array_n_by_dim** will examine the :class:`numpy.array_like`
*pts* and determine if the number of columns is equal to *dim*.
If so, this function returns the input argument *pts*. Otherwise,
the function returns an error.
Parameters
----------
pts : numpy.array_like
A 2D numpy array
dim : int
The number of columns which *pts* should have
Returns
-------
numpy.array_like
Returns the input argument *pts* if the number of columns equals *dim*.
"""
if type(pts) == list:
pts = np.array(pts)
if not isinstance(pts, np.ndarray):
raise TypeError("pts must be a numpy array")
if dim > 1:
pts = np.atleast_2d(pts)
elif len(pts.shape) == 1:
pts = pts[:, np.newaxis]
if pts.shape[1] != dim:
raise ValueError(
"pts must be a column vector of shape (nPts, {0:d}) not ({1:d}, {2:d})".format(
*((dim,) + pts.shape)
)
)
return pts
def requires(modules):
"""Decorator to wrap functions with soft dependencies.
This function was inspired by the `requires` function of pysal,
which is released under the 'BSD 3-Clause "New" or "Revised" License'.
https://github.com/pysal/pysal/blob/master/pysal/lib/common.py
Parameters
----------
modules : dict
Dictionary containing soft dependencies, e.g.,
{'matplotlib': matplotlib}.
Returns
-------
decorated_function : function
Original function if all soft dependencies are met, otherwise
it returns an empty function which prints why it is not running.
"""
# Check the required modules, add missing ones in the list `missing`.
missing = []
for key, item in modules.items():
if item is False:
missing.append(key)
def decorated_function(function):
"""Wrap function."""
if not missing:
return function
else:
def passer(*args, **kwargs):
print(("Missing dependencies: {d}.".format(d=missing)))
print(("Not running `{}`.".format(function.__name__)))
return passer
return decorated_function
def deprecate_class(removal_version=None, new_location=None):
def decorator(cls):
my_name = cls.__name__
parent_name = cls.__bases__[0].__name__
message = f"{my_name} has been deprecated, please use {parent_name}."
if removal_version is not None:
message += (
f" It will be removed in version {removal_version} of discretize."
)
else:
message += " It will be removed in a future version of discretize."
# stash the original initialization of the class
cls._old__init__ = cls.__init__
def __init__(self, *args, **kwargs):
warnings.warn(message, DeprecationWarning)
self._old__init__(*args, **kwargs)
cls.__init__ = __init__
if new_location is not None:
parent_name = f"{new_location}.{parent_name}"
cls.__doc__ = f""" This class has been deprecated, see `{parent_name}` for documentation"""
return cls
return decorator
def deprecate_module(old_name, new_name, removal_version=None):
message = f"The {old_name} module has been deprecated, please use {new_name}."
if removal_version is not None:
message += f" It will be removed in version {removal_version} of discretize"
else:
message += " It will be removed in a future version of discretize."
message += " Please update your code accordingly."
warnings.warn(message, DeprecationWarning)
def deprecate_property(new_name, old_name, removal_version=None):
if removal_version is not None:
tag = f" It will be removed in version {removal_version} of discretize."
else:
tag = " It will be removed in a future version of discretize."
def get_dep(self):
class_name = type(self).__name__
message = (
f"{class_name}.{old_name} has been deprecated, please use {class_name}.{new_name}."
+ tag
)
warnings.warn(message, DeprecationWarning)
return getattr(self, new_name)
def set_dep(self, other):
class_name = type(self).__name__
message = (
f"{class_name}.{old_name} has been deprecated, please use {class_name}.{new_name}."
+ tag
)
warnings.warn(message, DeprecationWarning)
setattr(self, new_name, other)
doc = f"`{old_name}` has been deprecated. See `{new_name}` for documentation"
return property(get_dep, set_dep, None, doc)
def deprecate_method(new_name, old_name, removal_version=None):
if removal_version is not None:
tag = f" It will be removed in version {removal_version} of discretize."
else:
tag = " It will be removed in a future version of discretize."
def new_method(self, *args, **kwargs):
class_name = type(self).__name__
warnings.warn(
f"{class_name}.{old_name} has been deprecated, please use {class_name}.{new_name}."
+ tag,
DeprecationWarning,
)
return getattr(self, new_name)(*args, **kwargs)
doc = f"""
`{old_name}` has been deprecated. See `{new_name}` for documentation
See Also
--------
{new_name}
"""
new_method.__doc__ = doc
return new_method
def deprecate_function(new_function, old_name, removal_version=None):
new_name = new_function.__name__
if removal_version is not None:
tag = f" It will be removed in version {removal_version} of discretize."
else:
tag = " It will be removed in a future version of discretize."
def dep_function(*args, **kwargs):
warnings.warn(
f"{old_name} has been deprecated, please use {new_name}." + tag,
DeprecationWarning,
)
return new_function(*args, **kwargs)
doc = f"""
`{old_name}` has been deprecated. See `{new_name}` for documentation
See Also
--------
{new_name}
"""
dep_function.__doc__ = doc
return dep_function
# DEPRECATIONS
isScalar = deprecate_function(is_scalar, "isScalar", removal_version="1.0.0")
asArray_N_x_Dim = deprecate_function(
as_array_n_by_dim, "asArray_N_x_Dim", removal_version="1.0.0"
)
| mit |
amolkahat/pandas | pandas/tests/util/test_testing.py | 1 | 35103 | # -*- coding: utf-8 -*-
import textwrap
import os
import pandas as pd
import pytest
import numpy as np
import sys
from pandas import Series, DataFrame
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.util.testing import (assert_almost_equal, raise_with_traceback,
assert_index_equal, assert_series_equal,
assert_frame_equal, assert_numpy_array_equal,
RNGContext)
from pandas import compat
class TestAssertAlmostEqual(object):
def _assert_almost_equal_both(self, a, b, **kwargs):
assert_almost_equal(a, b, **kwargs)
assert_almost_equal(b, a, **kwargs)
def _assert_not_almost_equal_both(self, a, b, **kwargs):
pytest.raises(AssertionError, assert_almost_equal, a, b, **kwargs)
pytest.raises(AssertionError, assert_almost_equal, b, a, **kwargs)
def test_assert_almost_equal_numbers(self):
self._assert_almost_equal_both(1.1, 1.1)
self._assert_almost_equal_both(1.1, 1.100001)
self._assert_almost_equal_both(np.int16(1), 1.000001)
self._assert_almost_equal_both(np.float64(1.1), 1.1)
self._assert_almost_equal_both(np.uint32(5), 5)
self._assert_not_almost_equal_both(1.1, 1)
self._assert_not_almost_equal_both(1.1, True)
self._assert_not_almost_equal_both(1, 2)
self._assert_not_almost_equal_both(1.0001, np.int16(1))
def test_assert_almost_equal_numbers_with_zeros(self):
self._assert_almost_equal_both(0, 0)
self._assert_almost_equal_both(0, 0.0)
self._assert_almost_equal_both(0, np.float64(0))
self._assert_almost_equal_both(0.000001, 0)
self._assert_not_almost_equal_both(0.001, 0)
self._assert_not_almost_equal_both(1, 0)
def test_assert_almost_equal_numbers_with_mixed(self):
self._assert_not_almost_equal_both(1, 'abc')
self._assert_not_almost_equal_both(1, [1, ])
self._assert_not_almost_equal_both(1, object())
@pytest.mark.parametrize(
"left_dtype",
['M8[ns]', 'm8[ns]', 'float64', 'int64', 'object'])
@pytest.mark.parametrize(
"right_dtype",
['M8[ns]', 'm8[ns]', 'float64', 'int64', 'object'])
def test_assert_almost_equal_edge_case_ndarrays(
self, left_dtype, right_dtype):
# empty compare
self._assert_almost_equal_both(np.array([], dtype=left_dtype),
np.array([], dtype=right_dtype),
check_dtype=False)
def test_assert_almost_equal_dicts(self):
self._assert_almost_equal_both({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
self._assert_not_almost_equal_both({'a': 1, 'b': 2}, {'a': 1, 'b': 3})
self._assert_not_almost_equal_both({'a': 1, 'b': 2},
{'a': 1, 'b': 2, 'c': 3})
self._assert_not_almost_equal_both({'a': 1}, 1)
self._assert_not_almost_equal_both({'a': 1}, 'abc')
self._assert_not_almost_equal_both({'a': 1}, [1, ])
def test_assert_almost_equal_dict_like_object(self):
class DictLikeObj(object):
def keys(self):
return ('a', )
def __getitem__(self, item):
if item == 'a':
return 1
self._assert_almost_equal_both({'a': 1}, DictLikeObj(),
check_dtype=False)
self._assert_not_almost_equal_both({'a': 2}, DictLikeObj(),
check_dtype=False)
def test_assert_almost_equal_strings(self):
self._assert_almost_equal_both('abc', 'abc')
self._assert_not_almost_equal_both('abc', 'abcd')
self._assert_not_almost_equal_both('abc', 'abd')
self._assert_not_almost_equal_both('abc', 1)
self._assert_not_almost_equal_both('abc', [1, ])
def test_assert_almost_equal_iterables(self):
self._assert_almost_equal_both([1, 2, 3], [1, 2, 3])
self._assert_almost_equal_both(np.array([1, 2, 3]),
np.array([1, 2, 3]))
# class / dtype are different
self._assert_not_almost_equal_both(np.array([1, 2, 3]), [1, 2, 3])
self._assert_not_almost_equal_both(np.array([1, 2, 3]),
np.array([1., 2., 3.]))
# Can't compare generators
self._assert_not_almost_equal_both(iter([1, 2, 3]), [1, 2, 3])
self._assert_not_almost_equal_both([1, 2, 3], [1, 2, 4])
self._assert_not_almost_equal_both([1, 2, 3], [1, 2, 3, 4])
self._assert_not_almost_equal_both([1, 2, 3], 1)
def test_assert_almost_equal_null(self):
self._assert_almost_equal_both(None, None)
self._assert_not_almost_equal_both(None, np.NaN)
self._assert_not_almost_equal_both(None, 0)
self._assert_not_almost_equal_both(np.NaN, 0)
def test_assert_almost_equal_inf(self):
self._assert_almost_equal_both(np.inf, np.inf)
self._assert_almost_equal_both(np.inf, float("inf"))
self._assert_not_almost_equal_both(np.inf, 0)
self._assert_almost_equal_both(np.array([np.inf, np.nan, -np.inf]),
np.array([np.inf, np.nan, -np.inf]))
self._assert_almost_equal_both(np.array([np.inf, None, -np.inf],
dtype=np.object_),
np.array([np.inf, np.nan, -np.inf],
dtype=np.object_))
def test_assert_almost_equal_pandas(self):
tm.assert_almost_equal(pd.Index([1., 1.1]),
pd.Index([1., 1.100001]))
tm.assert_almost_equal(pd.Series([1., 1.1]),
pd.Series([1., 1.100001]))
tm.assert_almost_equal(pd.DataFrame({'a': [1., 1.1]}),
pd.DataFrame({'a': [1., 1.100001]}))
def test_assert_almost_equal_object(self):
a = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-01')]
b = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-01')]
self._assert_almost_equal_both(a, b)
class TestUtilTesting(object):
def test_raise_with_traceback(self):
with tm.assert_raises_regex(LookupError, "error_text"):
try:
raise ValueError("THIS IS AN ERROR")
except ValueError as e:
e = LookupError("error_text")
raise_with_traceback(e)
with tm.assert_raises_regex(LookupError, "error_text"):
try:
raise ValueError("This is another error")
except ValueError:
e = LookupError("error_text")
_, _, traceback = sys.exc_info()
raise_with_traceback(e, traceback)
def test_convert_rows_list_to_csv_str(self):
rows_list = ["aaa", "bbb", "ccc"]
ret = tm.convert_rows_list_to_csv_str(rows_list)
if compat.is_platform_windows():
expected = "aaa\r\nbbb\r\nccc\r\n"
else:
expected = "aaa\nbbb\nccc\n"
assert ret == expected
class TestAssertNumpyArrayEqual(object):
@td.skip_if_windows
def test_numpy_array_equal_message(self):
expected = """numpy array are different
numpy array shapes are different
\\[left\\]: \\(2,\\)
\\[right\\]: \\(3,\\)"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(np.array([1, 2]), np.array([3, 4, 5]))
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(np.array([1, 2]), np.array([3, 4, 5]))
# scalar comparison
expected = """Expected type """
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(1, 2)
expected = """expected 2\\.00000 but got 1\\.00000, with decimal 5"""
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(1, 2)
# array / scalar array comparison
expected = """numpy array are different
numpy array classes are different
\\[left\\]: ndarray
\\[right\\]: int"""
with tm.assert_raises_regex(AssertionError, expected):
# numpy_array_equal only accepts np.ndarray
assert_numpy_array_equal(np.array([1]), 1)
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(np.array([1]), 1)
# scalar / array comparison
expected = """numpy array are different
numpy array classes are different
\\[left\\]: int
\\[right\\]: ndarray"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(1, np.array([1]))
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(1, np.array([1]))
expected = """numpy array are different
numpy array values are different \\(66\\.66667 %\\)
\\[left\\]: \\[nan, 2\\.0, 3\\.0\\]
\\[right\\]: \\[1\\.0, nan, 3\\.0\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(np.array([np.nan, 2, 3]),
np.array([1, np.nan, 3]))
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(np.array([np.nan, 2, 3]),
np.array([1, np.nan, 3]))
expected = """numpy array are different
numpy array values are different \\(50\\.0 %\\)
\\[left\\]: \\[1, 2\\]
\\[right\\]: \\[1, 3\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(np.array([1, 2]), np.array([1, 3]))
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(np.array([1, 2]), np.array([1, 3]))
expected = """numpy array are different
numpy array values are different \\(50\\.0 %\\)
\\[left\\]: \\[1\\.1, 2\\.000001\\]
\\[right\\]: \\[1\\.1, 2.0\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(
np.array([1.1, 2.000001]), np.array([1.1, 2.0]))
# must pass
assert_almost_equal(np.array([1.1, 2.000001]), np.array([1.1, 2.0]))
expected = """numpy array are different
numpy array values are different \\(16\\.66667 %\\)
\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\], \\[5, 6\\]\\]
\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\], \\[5, 6\\]\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(np.array([[1, 2], [3, 4], [5, 6]]),
np.array([[1, 3], [3, 4], [5, 6]]))
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(np.array([[1, 2], [3, 4], [5, 6]]),
np.array([[1, 3], [3, 4], [5, 6]]))
expected = """numpy array are different
numpy array values are different \\(25\\.0 %\\)
\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\]\\]
\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\]\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(np.array([[1, 2], [3, 4]]),
np.array([[1, 3], [3, 4]]))
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(np.array([[1, 2], [3, 4]]),
np.array([[1, 3], [3, 4]]))
# allow to overwrite message
expected = """Index are different
Index shapes are different
\\[left\\]: \\(2,\\)
\\[right\\]: \\(3,\\)"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(np.array([1, 2]), np.array([3, 4, 5]),
obj='Index')
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(np.array([1, 2]), np.array([3, 4, 5]),
obj='Index')
def test_numpy_array_equal_unicode_message(self):
# Test ensures that `assert_numpy_array_equals` raises the right
# exception when comparing np.arrays containing differing
# unicode objects (#20503)
expected = """numpy array are different
numpy array values are different \\(33\\.33333 %\\)
\\[left\\]: \\[á, à, ä\\]
\\[right\\]: \\[á, à, å\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(np.array([u'á', u'à', u'ä']),
np.array([u'á', u'à', u'å']))
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(np.array([u'á', u'à', u'ä']),
np.array([u'á', u'à', u'å']))
@td.skip_if_windows
def test_numpy_array_equal_object_message(self):
a = np.array([pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-01')])
b = np.array([pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')])
expected = """numpy array are different
numpy array values are different \\(50\\.0 %\\)
\\[left\\]: \\[2011-01-01 00:00:00, 2011-01-01 00:00:00\\]
\\[right\\]: \\[2011-01-01 00:00:00, 2011-01-02 00:00:00\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(a, b)
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal(a, b)
def test_numpy_array_equal_copy_flag(self):
a = np.array([1, 2, 3])
b = a.copy()
c = a.view()
expected = r'array\(\[1, 2, 3\]\) is not array\(\[1, 2, 3\]\)'
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(a, b, check_same='same')
expected = r'array\(\[1, 2, 3\]\) is array\(\[1, 2, 3\]\)'
with tm.assert_raises_regex(AssertionError, expected):
assert_numpy_array_equal(a, c, check_same='copy')
def test_assert_almost_equal_iterable_message(self):
expected = """Iterable are different
Iterable length are different
\\[left\\]: 2
\\[right\\]: 3"""
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal([1, 2], [3, 4, 5])
expected = """Iterable are different
Iterable values are different \\(50\\.0 %\\)
\\[left\\]: \\[1, 2\\]
\\[right\\]: \\[1, 3\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_almost_equal([1, 2], [1, 3])
class TestAssertIndexEqual(object):
def test_index_equal_message(self):
expected = """Index are different
Index levels are different
\\[left\\]: 1, Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: 2, MultiIndex\\(levels=\\[\\[u?'A', u?'B'\\], \\[1, 2, 3, 4\\]\\],
labels=\\[\\[0, 0, 1, 1\\], \\[0, 1, 2, 3\\]\\]\\)"""
idx1 = pd.Index([1, 2, 3])
idx2 = pd.MultiIndex.from_tuples([('A', 1), ('A', 2),
('B', 3), ('B', 4)])
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2, exact=False)
expected = """MultiIndex level \\[1\\] are different
MultiIndex level \\[1\\] values are different \\(25\\.0 %\\)
\\[left\\]: Int64Index\\(\\[2, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""
idx1 = pd.MultiIndex.from_tuples([('A', 2), ('A', 2),
('B', 3), ('B', 4)])
idx2 = pd.MultiIndex.from_tuples([('A', 1), ('A', 2),
('B', 3), ('B', 4)])
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2)
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2, check_exact=False)
expected = """Index are different
Index length are different
\\[left\\]: 3, Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: 4, Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""
idx1 = pd.Index([1, 2, 3])
idx2 = pd.Index([1, 2, 3, 4])
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2)
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2, check_exact=False)
expected = """Index are different
Index classes are different
\\[left\\]: Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: Float64Index\\(\\[1\\.0, 2\\.0, 3\\.0\\], dtype='float64'\\)"""
idx1 = pd.Index([1, 2, 3])
idx2 = pd.Index([1, 2, 3.0])
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2, exact=True)
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2, exact=True, check_exact=False)
expected = """Index are different
Index values are different \\(33\\.33333 %\\)
\\[left\\]: Float64Index\\(\\[1.0, 2.0, 3.0], dtype='float64'\\)
\\[right\\]: Float64Index\\(\\[1.0, 2.0, 3.0000000001\\], dtype='float64'\\)"""
idx1 = pd.Index([1, 2, 3.])
idx2 = pd.Index([1, 2, 3.0000000001])
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2)
# must success
assert_index_equal(idx1, idx2, check_exact=False)
expected = """Index are different
Index values are different \\(33\\.33333 %\\)
\\[left\\]: Float64Index\\(\\[1.0, 2.0, 3.0], dtype='float64'\\)
\\[right\\]: Float64Index\\(\\[1.0, 2.0, 3.0001\\], dtype='float64'\\)"""
idx1 = pd.Index([1, 2, 3.])
idx2 = pd.Index([1, 2, 3.0001])
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2)
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2, check_exact=False)
# must success
assert_index_equal(idx1, idx2, check_exact=False,
check_less_precise=True)
expected = """Index are different
Index values are different \\(33\\.33333 %\\)
\\[left\\]: Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 4\\], dtype='int64'\\)"""
idx1 = pd.Index([1, 2, 3])
idx2 = pd.Index([1, 2, 4])
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2)
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2, check_less_precise=True)
expected = """MultiIndex level \\[1\\] are different
MultiIndex level \\[1\\] values are different \\(25\\.0 %\\)
\\[left\\]: Int64Index\\(\\[2, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""
idx1 = pd.MultiIndex.from_tuples([('A', 2), ('A', 2),
('B', 3), ('B', 4)])
idx2 = pd.MultiIndex.from_tuples([('A', 1), ('A', 2),
('B', 3), ('B', 4)])
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2)
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2, check_exact=False)
def test_index_equal_metadata_message(self):
expected = """Index are different
Attribute "names" are different
\\[left\\]: \\[None\\]
\\[right\\]: \\[u?'x'\\]"""
idx1 = pd.Index([1, 2, 3])
idx2 = pd.Index([1, 2, 3], name='x')
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2)
# same name, should pass
assert_index_equal(pd.Index([1, 2, 3], name=np.nan),
pd.Index([1, 2, 3], name=np.nan))
assert_index_equal(pd.Index([1, 2, 3], name=pd.NaT),
pd.Index([1, 2, 3], name=pd.NaT))
expected = """Index are different
Attribute "names" are different
\\[left\\]: \\[nan\\]
\\[right\\]: \\[NaT\\]"""
idx1 = pd.Index([1, 2, 3], name=np.nan)
idx2 = pd.Index([1, 2, 3], name=pd.NaT)
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(idx1, idx2)
def test_categorical_index_equality(self):
expected = """Index are different
Attribute "dtype" are different
\\[left\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b'\\], ordered=False\\)
\\[right\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b', u?'c'\\], \
ordered=False\\)"""
with tm.assert_raises_regex(AssertionError, expected):
assert_index_equal(pd.Index(pd.Categorical(['a', 'b'])),
pd.Index(pd.Categorical(['a', 'b'],
categories=['a', 'b', 'c'])))
def test_categorical_index_equality_relax_categories_check(self):
assert_index_equal(pd.Index(pd.Categorical(['a', 'b'])),
pd.Index(pd.Categorical(['a', 'b'],
categories=['a', 'b', 'c'])),
check_categorical=False)
class TestAssertSeriesEqual(object):
def _assert_equal(self, x, y, **kwargs):
assert_series_equal(x, y, **kwargs)
assert_series_equal(y, x, **kwargs)
def _assert_not_equal(self, a, b, **kwargs):
pytest.raises(AssertionError, assert_series_equal, a, b, **kwargs)
pytest.raises(AssertionError, assert_series_equal, b, a, **kwargs)
def test_equal(self):
self._assert_equal(Series(range(3)), Series(range(3)))
self._assert_equal(Series(list('abc')), Series(list('abc')))
self._assert_equal(Series(list(u'áàä')), Series(list(u'áàä')))
def test_not_equal(self):
self._assert_not_equal(Series(range(3)), Series(range(3)) + 1)
self._assert_not_equal(Series(list('abc')), Series(list('xyz')))
self._assert_not_equal(Series(list(u'áàä')), Series(list(u'éèë')))
self._assert_not_equal(Series(list(u'áàä')), Series(list(b'aaa')))
self._assert_not_equal(Series(range(3)), Series(range(4)))
self._assert_not_equal(
Series(range(3)), Series(
range(3), dtype='float64'))
self._assert_not_equal(
Series(range(3)), Series(
range(3), index=[1, 2, 4]))
# ATM meta data is not checked in assert_series_equal
# self._assert_not_equal(Series(range(3)),Series(range(3),name='foo'),check_names=True)
def test_less_precise(self):
s1 = Series([0.12345], dtype='float64')
s2 = Series([0.12346], dtype='float64')
pytest.raises(AssertionError, assert_series_equal, s1, s2)
self._assert_equal(s1, s2, check_less_precise=True)
for i in range(4):
self._assert_equal(s1, s2, check_less_precise=i)
pytest.raises(AssertionError, assert_series_equal, s1, s2, 10)
s1 = Series([0.12345], dtype='float32')
s2 = Series([0.12346], dtype='float32')
pytest.raises(AssertionError, assert_series_equal, s1, s2)
self._assert_equal(s1, s2, check_less_precise=True)
for i in range(4):
self._assert_equal(s1, s2, check_less_precise=i)
pytest.raises(AssertionError, assert_series_equal, s1, s2, 10)
# even less than less precise
s1 = Series([0.1235], dtype='float32')
s2 = Series([0.1236], dtype='float32')
pytest.raises(AssertionError, assert_series_equal, s1, s2)
pytest.raises(AssertionError, assert_series_equal, s1, s2, True)
def test_index_dtype(self):
df1 = DataFrame.from_records(
{'a': [1, 2], 'c': ['l1', 'l2']}, index=['a'])
df2 = DataFrame.from_records(
{'a': [1.0, 2.0], 'c': ['l1', 'l2']}, index=['a'])
self._assert_not_equal(df1.c, df2.c, check_index_type=True)
def test_multiindex_dtype(self):
df1 = DataFrame.from_records(
{'a': [1, 2], 'b': [2.1, 1.5],
'c': ['l1', 'l2']}, index=['a', 'b'])
df2 = DataFrame.from_records(
{'a': [1.0, 2.0], 'b': [2.1, 1.5],
'c': ['l1', 'l2']}, index=['a', 'b'])
self._assert_not_equal(df1.c, df2.c, check_index_type=True)
def test_series_equal_message(self):
expected = """Series are different
Series length are different
\\[left\\]: 3, RangeIndex\\(start=0, stop=3, step=1\\)
\\[right\\]: 4, RangeIndex\\(start=0, stop=4, step=1\\)"""
with tm.assert_raises_regex(AssertionError, expected):
assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 3, 4]))
expected = """Series are different
Series values are different \\(33\\.33333 %\\)
\\[left\\]: \\[1, 2, 3\\]
\\[right\\]: \\[1, 2, 4\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 4]))
with tm.assert_raises_regex(AssertionError, expected):
assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 4]),
check_less_precise=True)
def test_categorical_series_equality(self):
expected = """Attributes are different
Attribute "dtype" are different
\\[left\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b'\\], ordered=False\\)
\\[right\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b', u?'c'\\], \
ordered=False\\)"""
with tm.assert_raises_regex(AssertionError, expected):
assert_series_equal(pd.Series(pd.Categorical(['a', 'b'])),
pd.Series(pd.Categorical(['a', 'b'],
categories=['a', 'b', 'c'])))
def test_categorical_series_equality_relax_categories_check(self):
assert_series_equal(pd.Series(pd.Categorical(['a', 'b'])),
pd.Series(pd.Categorical(['a', 'b'],
categories=['a', 'b', 'c'])),
check_categorical=False)
class TestAssertFrameEqual(object):
def _assert_equal(self, x, y, **kwargs):
assert_frame_equal(x, y, **kwargs)
assert_frame_equal(y, x, **kwargs)
def _assert_not_equal(self, a, b, **kwargs):
pytest.raises(AssertionError, assert_frame_equal, a, b, **kwargs)
pytest.raises(AssertionError, assert_frame_equal, b, a, **kwargs)
def test_equal_with_different_row_order(self):
# check_like=True ignores row-column orderings
df1 = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['a', 'b', 'c'])
df2 = pd.DataFrame({'A': [3, 2, 1], 'B': [6, 5, 4]},
index=['c', 'b', 'a'])
self._assert_equal(df1, df2, check_like=True)
self._assert_not_equal(df1, df2)
def test_not_equal_with_different_shape(self):
self._assert_not_equal(pd.DataFrame({'A': [1, 2, 3]}),
pd.DataFrame({'A': [1, 2, 3, 4]}))
def test_index_dtype(self):
df1 = DataFrame.from_records(
{'a': [1, 2], 'c': ['l1', 'l2']}, index=['a'])
df2 = DataFrame.from_records(
{'a': [1.0, 2.0], 'c': ['l1', 'l2']}, index=['a'])
self._assert_not_equal(df1, df2, check_index_type=True)
def test_multiindex_dtype(self):
df1 = DataFrame.from_records(
{'a': [1, 2], 'b': [2.1, 1.5],
'c': ['l1', 'l2']}, index=['a', 'b'])
df2 = DataFrame.from_records(
{'a': [1.0, 2.0], 'b': [2.1, 1.5],
'c': ['l1', 'l2']}, index=['a', 'b'])
self._assert_not_equal(df1, df2, check_index_type=True)
def test_empty_dtypes(self):
df1 = pd.DataFrame(columns=["col1", "col2"])
df1["col1"] = df1["col1"].astype('int64')
df2 = pd.DataFrame(columns=["col1", "col2"])
self._assert_equal(df1, df2, check_dtype=False)
self._assert_not_equal(df1, df2, check_dtype=True)
def test_frame_equal_message(self):
expected = """DataFrame are different
DataFrame shape mismatch
\\[left\\]: \\(3, 2\\)
\\[right\\]: \\(3, 1\\)"""
with tm.assert_raises_regex(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}),
pd.DataFrame({'A': [1, 2, 3]}))
expected = """DataFrame\\.index are different
DataFrame\\.index values are different \\(33\\.33333 %\\)
\\[left\\]: Index\\(\\[u?'a', u?'b', u?'c'\\], dtype='object'\\)
\\[right\\]: Index\\(\\[u?'a', u?'b', u?'d'\\], dtype='object'\\)"""
with tm.assert_raises_regex(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['a', 'b', 'c']),
pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['a', 'b', 'd']))
expected = """DataFrame\\.columns are different
DataFrame\\.columns values are different \\(50\\.0 %\\)
\\[left\\]: Index\\(\\[u?'A', u?'B'\\], dtype='object'\\)
\\[right\\]: Index\\(\\[u?'A', u?'b'\\], dtype='object'\\)"""
with tm.assert_raises_regex(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['a', 'b', 'c']),
pd.DataFrame({'A': [1, 2, 3], 'b': [4, 5, 6]},
index=['a', 'b', 'c']))
expected = """DataFrame\\.iloc\\[:, 1\\] are different
DataFrame\\.iloc\\[:, 1\\] values are different \\(33\\.33333 %\\)
\\[left\\]: \\[4, 5, 6\\]
\\[right\\]: \\[4, 5, 7\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}),
pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 7]}))
with tm.assert_raises_regex(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}),
pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 7]}),
by_blocks=True)
def test_frame_equal_message_unicode(self):
# Test ensures that `assert_frame_equals` raises the right
# exception when comparing DataFrames containing differing
# unicode objects (#20503)
expected = """DataFrame\\.iloc\\[:, 1\\] are different
DataFrame\\.iloc\\[:, 1\\] values are different \\(33\\.33333 %\\)
\\[left\\]: \\[é, è, ë\\]
\\[right\\]: \\[é, è, e̊\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [u'á', u'à', u'ä'],
'E': [u'é', u'è', u'ë']}),
pd.DataFrame({'A': [u'á', u'à', u'ä'],
'E': [u'é', u'è', u'e̊']}))
with tm.assert_raises_regex(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [u'á', u'à', u'ä'],
'E': [u'é', u'è', u'ë']}),
pd.DataFrame({'A': [u'á', u'à', u'ä'],
'E': [u'é', u'è', u'e̊']}),
by_blocks=True)
expected = """DataFrame\\.iloc\\[:, 0\\] are different
DataFrame\\.iloc\\[:, 0\\] values are different \\(100\\.0 %\\)
\\[left\\]: \\[á, à, ä\\]
\\[right\\]: \\[a, a, a\\]"""
with tm.assert_raises_regex(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [u'á', u'à', u'ä'],
'E': [u'é', u'è', u'ë']}),
pd.DataFrame({'A': ['a', 'a', 'a'],
'E': ['e', 'e', 'e']}))
with tm.assert_raises_regex(AssertionError, expected):
assert_frame_equal(pd.DataFrame({'A': [u'á', u'à', u'ä'],
'E': [u'é', u'è', u'ë']}),
pd.DataFrame({'A': ['a', 'a', 'a'],
'E': ['e', 'e', 'e']}),
by_blocks=True)
class TestAssertCategoricalEqual(object):
def test_categorical_equal_message(self):
expected = """Categorical\\.categories are different
Categorical\\.categories values are different \\(25\\.0 %\\)
\\[left\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 3, 5\\], dtype='int64'\\)"""
a = pd.Categorical([1, 2, 3, 4])
b = pd.Categorical([1, 2, 3, 5])
with tm.assert_raises_regex(AssertionError, expected):
tm.assert_categorical_equal(a, b)
expected = """Categorical\\.codes are different
Categorical\\.codes values are different \\(50\\.0 %\\)
\\[left\\]: \\[0, 1, 3, 2\\]
\\[right\\]: \\[0, 1, 2, 3\\]"""
a = pd.Categorical([1, 2, 4, 3], categories=[1, 2, 3, 4])
b = pd.Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
with tm.assert_raises_regex(AssertionError, expected):
tm.assert_categorical_equal(a, b)
expected = """Categorical are different
Attribute "ordered" are different
\\[left\\]: False
\\[right\\]: True"""
a = pd.Categorical([1, 2, 3, 4], ordered=False)
b = pd.Categorical([1, 2, 3, 4], ordered=True)
with tm.assert_raises_regex(AssertionError, expected):
tm.assert_categorical_equal(a, b)
class TestAssertIntervalArrayEqual(object):
def test_interval_array_equal_message(self):
a = pd.interval_range(0, periods=4).values
b = pd.interval_range(1, periods=4).values
msg = textwrap.dedent("""\
IntervalArray.left are different
IntervalArray.left values are different \\(100.0 %\\)
\\[left\\]: Int64Index\\(\\[0, 1, 2, 3\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)""")
with tm.assert_raises_regex(AssertionError, msg):
tm.assert_interval_array_equal(a, b)
class TestRNGContext(object):
def test_RNGContext(self):
expected0 = 1.764052345967664
expected1 = 1.6243453636632417
with RNGContext(0):
with RNGContext(1):
assert np.random.randn() == expected1
assert np.random.randn() == expected0
def test_datapath_missing(datapath, request):
if not request.config.getoption("--strict-data-files"):
pytest.skip("Need to set '--strict-data-files'")
with pytest.raises(ValueError):
datapath('not_a_file')
result = datapath('data', 'iris.csv')
expected = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'data',
'iris.csv'
)
assert result == expected
| bsd-3-clause |
rvraghav93/scikit-learn | sklearn/ensemble/gradient_boosting.py | 4 | 76278 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly, Jacob Schreiber
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta
from abc import abstractmethod
from .base import BaseEnsemble
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
import numbers
import numpy as np
from scipy import stats
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from scipy.special import expit
from time import time
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE
from ..tree._tree import TREE_LEAF
from ..utils import check_random_state
from ..utils import check_array
from ..utils import check_X_y
from ..utils import column_or_1d
from ..utils import check_consistent_length
from ..utils import deprecated
from ..utils.fixes import logsumexp
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
class QuantileEstimator(object):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight,
self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(object):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(object):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(object):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = np.bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(object):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() -
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) -
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
# prevents overflow and division by zero
if abs(denominator) < 1e-150:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
# prevents overflow and division by zero
if abs(denominator) < 1e-150:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
# prevents overflow and division by zero
if abs(denominator) < 1e-150:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, criterion,
min_samples_split, min_samples_leaf, min_weight_fraction_leaf,
max_depth, min_impurity_decrease, min_impurity_split,
init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.criterion = criterion
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.presort = presort
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
random_state, X_idx_sorted, X_csc=None, X_csr=None):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=self.criterion,
splitter='best',
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
min_impurity_decrease=self.min_impurity_decrease,
min_impurity_split=self.min_impurity_split,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state,
presort=self.presort)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
if X_csc is not None:
tree.fit(X_csc, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
else:
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
# update tree leaves
if X_csr is not None:
loss.update_terminal_regions(tree.tree_, X_csr, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
else:
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
# is regression
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features *
self.n_features_), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
check_is_fitted(self, 'estimators_')
@property
@deprecated("Attribute n_features was deprecated in version 0.19 and "
"will be removed in 0.21.")
def n_features(self):
return self.n_features_
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE)
n_samples, self.n_features_ = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
X_idx_sorted = None
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if presort == 'auto' and issparse(X):
presort = False
elif presort == 'auto':
presort = True
if presort == True:
if issparse(X):
raise ValueError("Presorting is not supported for sparse matrices.")
else:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor, X_idx_sorted)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None, X_idx_sorted=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, random_state, X_idx_sorted,
X_csc, X_csr)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
if X.shape[1] != self.n_features_:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features_, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr')
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
self._check_initialized()
total_sum = np.zeros((self.n_features_, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will
be converted to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators, n_classes]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
In the case of binary classification n_classes is 1.
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
# n_classes will be equal to 1 in the binary classification or the
# regression case.
n_estimators, n_classes = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
criterion : string, optional (default="friedman_mse")
The function to measure the quality of a split. Supported criteria
are "friedman_mse" for the mean squared error with improvement
score by Friedman, "mse" for mean squared error, and "mae" for
the mean absolute error. The default value of "friedman_mse" is
generally the best as it can provide a better approximation in
some cases.
.. versionadded:: 0.18
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
.. versionadded:: 0.17
*presort* parameter.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
Notes
-----
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data and
``max_features=n_features``, if the improvement of the criterion is
identical for several splits enumerated during the search of the best
split. To obtain a deterministic behaviour during fitting,
``random_state`` has to be fixed.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, criterion='friedman_mse', min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, min_impurity_decrease=0.,
min_impurity_split=None, init=None,
random_state=None, max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False,
presort='auto'):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
criterion=criterion, min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
warm_start=warm_start,
presort=presort)
def _validate_y(self, y):
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr')
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
criterion : string, optional (default="friedman_mse")
The function to measure the quality of a split. Supported criteria
are "friedman_mse" for the mean squared error with improvement
score by Friedman, "mse" for mean squared error, and "mae" for
the mean absolute error. The default value of "friedman_mse" is
generally the best as it can provide a better approximation in
some cases.
.. versionadded:: 0.18
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
.. versionadded:: 0.17
optional parameter *presort*.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
Notes
-----
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data and
``max_features=n_features``, if the improvement of the criterion is
identical for several splits enumerated during the search of the best
split. To obtain a deterministic behaviour during fitting,
``random_state`` has to be fixed.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, criterion='friedman_mse', min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, min_impurity_decrease=0.,
min_impurity_split=None, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
criterion=criterion, min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,
presort=presort)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr')
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will
be converted to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
"""
leaves = super(GradientBoostingRegressor, self).apply(X)
leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
return leaves
| bsd-3-clause |
rishikksh20/scikit-learn | examples/svm/plot_svm_margin.py | 88 | 2540 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors (margin away from hyperplane in direction
# perpendicular to hyperplane). This is sqrt(1+a^2) away vertically in
# 2-d.
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy - np.sqrt(1 + a ** 2) * margin
yy_up = yy + np.sqrt(1 + a ** 2) * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10, edgecolors='k')
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired,
edgecolors='k')
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
nbeaver/numpy | doc/example.py | 81 | 3581 | """This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceeded by a blank line.
"""
from __future__ import division, absolute_import, print_function
import os # standard library imports first
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
from my_module import my_func, other_func
def foo(var1, var2, long_var_name='hi') :
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
pass
| bsd-3-clause |
f2nd/yandex-tank | yandextank/plugins/Console/screen.py | 1 | 40733 | # -*- coding: utf-8 -*-
""" Classes to build full console screen """
import fcntl
import logging
import os
import struct
import termios
import time
import bisect
from collections import defaultdict
import pandas as pd
from ...common import util
def get_terminal_size():
"""
Gets width and height of terminal viewport
"""
default_size = (30, 120)
env = os.environ
def ioctl_gwinsz(file_d):
"""
Helper to get console size
"""
try:
sizes = struct.unpack(
'hh', fcntl.ioctl(file_d, termios.TIOCGWINSZ, '1234'))
except Exception:
sizes = default_size
return sizes
sizes = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
if not sizes:
try:
file_d = os.open(os.ctermid(), os.O_RDONLY)
sizes = ioctl_gwinsz(file_d)
os.close(file_d.fileno())
except Exception:
pass
if not sizes:
try:
sizes = (env['LINES'], env['COLUMNS'])
except Exception:
sizes = default_size
return int(sizes[1]), int(sizes[0])
def safe_div(summ, count):
if count == 0:
return 0
else:
return float(summ) / count
def str_len(n):
return len(str(n))
def avg_from_dict(src):
result = {}
count = src['count']
for k, v in src.items():
if k != 'count':
result[k] = safe_div(v, count)
return result
def krutilka():
pos = 0
chars = "|/-\\"
while True:
yield chars[pos]
pos += 1
if pos >= len(chars):
pos = 0
def try_color(old, new, markup):
order = [
markup.WHITE, markup.GREEN, markup.CYAN,
markup.YELLOW, markup.MAGENTA, markup.RED]
if not old:
return new
else:
if order.index(old) > order.index(new):
return old
else:
return new
def combine_codes(tag_data, markup):
net_err, http_err = 0, 0
color = ''
net_codes = tag_data['net_code']['count']
for code, count in sorted(net_codes.items()):
if count > 0:
if int(code) == 0:
continue
elif int(code) == 314:
color = try_color(color, markup.MAGENTA, markup)
net_err += count
else:
color = try_color(color, markup.RED, markup)
net_err += count
http_codes = tag_data['proto_code']['count']
for code, count in sorted(http_codes.items()):
if count > 0:
if 100 <= int(code) <= 299:
color = try_color(color, markup.GREEN, markup)
elif 300 <= int(code) <= 399:
color = try_color(color, markup.CYAN, markup)
elif 400 <= int(code) <= 499:
color = try_color(color, markup.YELLOW, markup)
http_err += count
elif 500 <= int(code) <= 599:
color = try_color(color, markup.RED, markup)
http_err += count
else:
color = try_color(color, markup.MAGENTA, markup)
http_err += count
return net_err, http_err, color
class TableFormatter(object):
def __init__(self, template, delimiters, reshape_delay=5):
self.log = logging.getLogger(__name__)
self.template = template
self.delimiters = delimiters
self.default_final = '{{{:}:>{len}}}'
self.last_reshaped = time.time()
self.old_shape = {}
self.reshape_delay = reshape_delay
def __delimiter_gen(self):
for d in self.delimiters:
yield d
while True:
yield self.delimiters[-1]
def __prepare(self, data):
prepared = []
shape = {}
for line in data:
new = {}
for f in line:
if f in self.template:
new[f] = self.template[f]['tpl'].format(line[f])
if f not in shape:
shape[f] = len(new[f])
else:
shape[f] = max(shape[f], len(new[f]))
prepared.append(new)
return prepared, shape
def __update_shape(self, shape):
def change_shape():
self.last_reshaped = time.time()
self.old_shape = shape
if set(shape.keys()) != set(self.old_shape.keys()):
change_shape()
return shape
else:
for f in shape:
if shape[f] > self.old_shape[f]:
change_shape()
return shape
elif shape[f] < self.old_shape[f]:
if time.time() > (self.last_reshaped + self.reshape_delay):
change_shape()
return shape
return self.old_shape
def render_table(self, data, fields):
prepared, shape = self.__prepare(data)
headers = {}
for f in shape:
if 'header' in self.template[f]:
headers[f] = self.template[f]['header']
shape[f] = max(shape[f], len(headers[f]))
else:
headers[f] = ''
shape = self.__update_shape(shape)
has_headers = any(headers.values())
delimiter_gen = self.__delimiter_gen()
row_tpl = ''
for num, field in enumerate(fields):
if 'final' in self.template[field]:
final = self.template[field]['final']
else:
final = self.default_final
row_tpl += final.format(field, len=shape[field])
if num < len(fields) - 1:
row_tpl += next(delimiter_gen)
result = []
if has_headers:
result.append(
(row_tpl.format(**headers),))
for line in prepared:
result.append(
(row_tpl.format(**line),))
return result
class Sparkline(object):
def __init__(self, window):
self.log = logging.getLogger(__name__)
self.data = {}
self.window = window
self.active_seconds = []
self.ticks = '_▁▂▃▄▅▆▇'
def recalc_active(self, ts):
if not self.active_seconds:
self.active_seconds.append(ts)
self.data[ts] = {}
if ts not in self.active_seconds:
if ts > max(self.active_seconds):
for i in range(max(self.active_seconds) + 1, ts + 1):
self.active_seconds.append(i)
self.active_seconds.sort()
self.data[i] = {}
while len(self.active_seconds) > self.window:
self.active_seconds.pop(0)
for sec in list(self.data.keys()):
if sec not in self.active_seconds:
self.data.pop(sec)
def get_key_data(self, key):
result = []
if not self.active_seconds:
return None
for sec in self.active_seconds:
if key in self.data[sec]:
result.append(self.data[sec][key])
else:
result.append(('', 0))
return result
def add(self, ts, key, value, color=''):
if ts not in self.data:
self.recalc_active(ts)
if ts < min(self.active_seconds):
self.log.warning('Sparkline got outdated second %s, oldest in list %s', ts, min(self.active_seconds))
return
value = max(value, 0)
self.data[ts][key] = (color, value)
def get_sparkline(self, key, baseline='zero', spark_len='auto', align='right'):
if spark_len == 'auto':
spark_len = self.window
elif spark_len <= 0:
return ''
key_data = self.get_key_data(key)
if not key_data:
return ''
active_data = key_data[-spark_len:]
result = []
if active_data:
values = [i[1] for i in active_data]
if baseline == 'zero':
min_val = 0
step = float(max(values)) / len(self.ticks)
elif baseline == 'min':
min_val = min(values)
step = float(max(values) - min_val) / len(self.ticks)
ranges = [step * i for i in range(len(self.ticks) + 1)]
for color, value in active_data:
if value <= 0:
tick = ' '
else:
rank = bisect.bisect_left(ranges, value) - 1
rank = max(rank, 0)
rank = min(rank, len(self.ticks) - 1)
tick = self.ticks[rank]
result.append(color)
result.append(tick)
space = ' ' * (spark_len - len(active_data))
if align == 'right':
result = [space] + result
elif align == 'left':
result = result + [space]
return result
class Screen(object):
""" Console screen renderer class """
RIGHT_PANEL_SEPARATOR = ' . '
def __init__(self, info_panel_width, markup_provider, **kwargs):
self.log = logging.getLogger(__name__)
self.info_panel_percent = int(info_panel_width)
self.info_widgets = {}
self.markup = markup_provider
self.term_height = 60
self.term_width = 120
self.right_panel_width = 10
self.left_panel_width = self.term_width - self.right_panel_width - len(
self.RIGHT_PANEL_SEPARATOR)
cases_args = dict(
[(k, v)
for k, v in kwargs.items()
if k in ['cases_sort_by', 'cases_max_spark', 'max_case_len']]
)
times_args = {'times_max_spark': kwargs['times_max_spark']}
sizes_args = {'sizes_max_spark': kwargs['sizes_max_spark']}
codes_block = VerticalBlock(CurrentHTTPBlock(self), CurrentNetBlock(self), self)
times_block = VerticalBlock(AnswSizesBlock(self, **sizes_args), AvgTimesBlock(self, **times_args), self)
top_block = VerticalBlock(codes_block, times_block, self)
general_block = HorizontalBlock(PercentilesBlock(self), top_block, self)
overall_block = VerticalBlock(RPSBlock(self), general_block, self)
final_block = VerticalBlock(overall_block, CasesBlock(self, **cases_args), self)
self.left_panel = final_block
def __get_right_line(self, widget_output):
""" Gets next line for right panel """
right_line = ''
if widget_output:
right_line = widget_output.pop(0)
if len(right_line) > self.right_panel_width:
right_line_plain = self.markup.clean_markup(right_line)
if len(right_line_plain) > self.right_panel_width:
right_line = right_line[:self.right_panel_width] + self.markup.RESET
return right_line
def __truncate(self, line_arr, max_width):
""" Cut tuple of line chunks according to it's wisible lenght """
def is_space(chunk):
return all([True if i == ' ' else False for i in chunk])
def is_empty(chunks, markups):
result = []
for chunk in chunks:
if chunk in markups:
result.append(True)
elif is_space(chunk):
result.append(True)
else:
result.append(False)
return all(result)
left = max_width
result = ''
markups = self.markup.get_markup_vars()
for num, chunk in enumerate(line_arr):
if chunk in markups:
result += chunk
else:
if left > 0:
if len(chunk) <= left:
result += chunk
left -= len(chunk)
else:
leftover = (chunk[left:],) + line_arr[num + 1:]
was_cut = not is_empty(leftover, markups)
if was_cut:
result += chunk[:left - 1] + self.markup.RESET + '\u2026'
else:
result += chunk[:left]
left = 0
return result
def __render_left_panel(self):
""" Render left blocks """
self.log.debug("Rendering left blocks")
left_block = self.left_panel
left_block.render()
blank_space = self.left_panel_width - left_block.width
lines = []
pre_space = ' ' * int(blank_space / 2)
if not left_block.lines:
lines = [(''), (self.markup.RED + 'BROKEN LEFT PANEL' + self.markup.RESET)]
else:
while self.left_panel.lines:
src_line = self.left_panel.lines.pop(0)
line = pre_space + self.__truncate(src_line, self.left_panel_width)
post_space = ' ' * (self.left_panel_width - len(self.markup.clean_markup(line)))
line += post_space + self.markup.RESET
lines.append(line)
return lines
def render_screen(self):
""" Main method to render screen view """
self.term_width, self.term_height = get_terminal_size()
self.log.debug(
"Terminal size: %sx%s", self.term_width, self.term_height)
self.right_panel_width = int(
(self.term_width - len(self.RIGHT_PANEL_SEPARATOR))
* (float(self.info_panel_percent) / 100)) - 1
if self.right_panel_width > 0:
self.left_panel_width = self.term_width - \
self.right_panel_width - len(self.RIGHT_PANEL_SEPARATOR) - 2
else:
self.right_panel_width = 0
self.left_panel_width = self.term_width - 1
self.log.debug(
"Left/right panels width: %s/%s", self.left_panel_width,
self.right_panel_width)
widget_output = []
if self.right_panel_width:
widget_output = []
self.log.debug("There are %d info widgets" % len(self.info_widgets))
for index, widget in sorted(
iter(self.info_widgets.items()),
key=lambda item: (item[1].get_index(), item[0])):
self.log.debug("Rendering info widget #%s: %s", index, widget)
widget_out = widget.render(self).strip()
if widget_out:
widget_output += widget_out.split("\n")
widget_output += [""]
left_lines = self.__render_left_panel()
self.log.debug("Composing final screen output")
output = []
for line_no in range(1, self.term_height):
line = " "
if line_no > 1 and left_lines:
left_line = left_lines.pop(0)
left_line_plain = self.markup.clean_markup(left_line)
left_line += (
' ' * (self.left_panel_width - len(left_line_plain)))
line += left_line
else:
line += ' ' * self.left_panel_width
if self.right_panel_width:
line += self.markup.RESET
line += self.markup.WHITE
line += self.RIGHT_PANEL_SEPARATOR
line += self.markup.RESET
right_line = self.__get_right_line(widget_output)
line += right_line
output.append(line)
return self.markup.new_line.join(output) + self.markup.new_line
def add_info_widget(self, widget):
"""
Add widget string to right panel of the screen
"""
index = widget.get_index()
while index in list(self.info_widgets.keys()):
index += 1
self.info_widgets[widget.get_index()] = widget
def add_second_data(self, data):
"""
Notification method about new aggregator data
"""
self.left_panel.add_second(data)
class AbstractBlock:
"""
Parent class for all left panel blocks
"""
def __init__(self, screen):
self.log = logging.getLogger(__name__)
self.lines = []
self.width = 0
self.screen = screen
def add_second(self, data):
"""
Notification about new aggregate data
"""
pass
def fill_rectangle(self, prepared):
""" Right-pad lines of block to equal width """
result = []
width = max([self.clean_len(line) for line in prepared])
for line in prepared:
spacer = ' ' * (width - self.clean_len(line))
result.append(line + (self.screen.markup.RESET, spacer))
return width, result
def clean_len(self, line):
""" Calculate wisible length of string """
if isinstance(line, str):
return len(self.screen.markup.clean_markup(line))
elif isinstance(line, tuple) or isinstance(line, list):
markups = self.screen.markup.get_markup_vars()
length = 0
for i in line:
if i not in markups:
length += len(i)
return length
def render(self):
"""
Render method, fills .lines and .width properties with rendered data
"""
raise RuntimeError("Abstract method needs to be overridden")
class HorizontalBlock(AbstractBlock):
"""
Block to merge two other blocks horizontaly
"""
def __init__(self, left_block, right_block, screen):
AbstractBlock.__init__(self, screen)
self.left = left_block
self.right = right_block
self.separator = ' . '
def render(self, expected_width=None):
if not expected_width:
expected_width = self.screen.left_panel_width
def get_line(source, num):
if num >= len(source.lines):
return (' ' * source.width,)
else:
source_line = source.lines[n]
spacer = ' ' * (source.width - self.clean_len(source_line))
return source_line + (spacer,)
self.left.render(expected_width=expected_width)
right_width_limit = expected_width - self.left.width - len(self.separator)
self.right.render(expected_width=right_width_limit)
self.height = max(len(self.left.lines), len(self.right.lines))
self.width = self.left.width + self.right.width + len(self.separator)
self.lines = []
for n in range(self.height):
self.lines.append(
get_line(self.left, n) + (self.separator,) + get_line(self.right, n)
)
self.lines.append((' ' * self.width,))
def add_second(self, data):
self.left.add_second(data)
self.right.add_second(data)
class VerticalBlock(AbstractBlock):
"""
Block to merge two other blocks vertically
"""
def __init__(self, top_block, bottom_block, screen):
AbstractBlock.__init__(self, screen)
self.top = top_block
self.bottom = bottom_block
def render(self, expected_width=None):
if not expected_width:
expected_width = self.screen.left_panel_width
self.top.render(expected_width=expected_width)
self.bottom.render(expected_width=expected_width)
self.width = max(self.top.width, self.bottom.width)
self.lines = []
for line in self.top.lines:
spacer = ' ' * (self.width - self.top.width)
self.lines.append(line + (spacer,))
if self.top.lines and self.bottom.lines:
spacer = ' ' * self.width
self.lines.append((spacer,))
for line in self.bottom.lines:
spacer = ' ' * (self.width - self.bottom.width)
self.lines.append(line + (spacer,))
def add_second(self, data):
self.top.add_second(data)
self.bottom.add_second(data)
class RPSBlock(AbstractBlock):
""" Actual RPS sparkline """
def __init__(self, screen):
AbstractBlock.__init__(self, screen)
self.begin_tpl = 'Data delay: {delay}s, RPS: {rps:>3,} '
self.sparkline = Sparkline(180)
self.last_count = 0
self.last_second = None
def add_second(self, data):
count = data['overall']['interval_real']['len']
self.last_count = count
ts = data['ts']
self.last_second = ts
self.sparkline.add(ts, 'rps', count)
def render(self, expected_width=None):
if self.last_second:
delay = int(time.time() - self.last_second)
else:
delay = ' - '
line_start = self.begin_tpl.format(rps=self.last_count, delay=delay)
spark_len = expected_width - len(line_start) - 2
spark = self.sparkline.get_sparkline('rps', spark_len=spark_len)
prepared = [(self.screen.markup.WHITE, line_start, ' ') + tuple(spark) + (self.screen.markup.RESET,)]
self.width, self.lines = self.fill_rectangle(prepared)
class PercentilesBlock(AbstractBlock):
""" Aggregated percentiles """
def __init__(self, screen):
AbstractBlock.__init__(self, screen)
self.title = 'Percentiles (all/last 1m/last), ms:'
self.overall = None
self.width = 10
self.last_ts = None
self.last_min = {}
self.quantiles = [10, 20, 30, 40, 50, 60, 70, 75, 80, 85, 90, 95, 99, 99.5, 100]
template = {
'quantile': {'tpl': '{:>.1f}%'},
'all': {'tpl': '{:>,.1f}'}, # noqa: E241
'last_1m': {'tpl': '{:>,.1f}'}, # noqa: E241
'last': {'tpl': '{:>,.1f}'} # noqa: E241
}
delimiters = [' < ', ' ']
self.formatter = TableFormatter(template, delimiters)
def add_second(self, data):
incoming_hist = data['overall']['interval_real']['hist']
ts = data['ts']
self.precise_quantiles = {
q: float(v) / 1000
for q, v in zip(
data["overall"]["interval_real"]["q"]["q"],
data["overall"]["interval_real"]["q"]["value"])
}
dist = pd.Series(incoming_hist['data'], index=incoming_hist['bins'])
if self.overall is None:
self.overall = dist
else:
self.overall = self.overall.add(dist, fill_value=0)
for second in list(self.last_min.keys()):
if ts - second > 60:
self.last_min.pop(second)
self.last_min[ts] = dist
def __calc_percentiles(self):
def hist_to_quant(histogram, quant):
cumulative = histogram.cumsum()
total = cumulative.max()
positions = cumulative.searchsorted([float(i) / 100 * total for i in quant])
quant_times = [cumulative.index[i] / 1000. for i in positions]
return quant_times
all_times = hist_to_quant(self.overall, self.quantiles)
last_data = self.last_min[max(self.last_min.keys())]
last_times = hist_to_quant(last_data, self.quantiles)
# Check if we have precise data for last second quantiles instead of binned histogram
for position, q in enumerate(self.quantiles):
if q in self.precise_quantiles:
last_times[position] = self.precise_quantiles[q]
# Replace binned values with precise, if lower quantile bin happens to be
# greater than upper quantile precise values
for position in reversed(list(range(1, len(last_times)))):
if last_times[position - 1] > last_times[position]:
last_times[position - 1] = last_times[position]
last_1m = pd.Series()
for ts, data in self.last_min.items():
if last_1m.empty:
last_1m = data
else:
last_1m = last_1m.add(data, fill_value=0)
last_1m_times = hist_to_quant(last_1m, self.quantiles)
quant_times = reversed(
list(zip(self.quantiles, all_times, last_1m_times, last_times))
)
data = []
for q, all_time, last_1m, last_time in quant_times:
data.append({
'quantile': q,
'all': all_time,
'last_1m': last_1m,
'last': last_time
})
return data
def render(self, expected_width=None):
prepared = [(self.screen.markup.WHITE, self.title)]
if self.overall is None:
prepared.append(('',))
else:
data = self.__calc_percentiles()
prepared += self.formatter.render_table(data, ['quantile', 'all', 'last_1m', 'last'])
self.width, self.lines = self.fill_rectangle(prepared)
class CurrentHTTPBlock(AbstractBlock):
""" Http codes with highlight"""
def __init__(self, screen):
AbstractBlock.__init__(self, screen)
self.overall_dist = defaultdict(int)
self.title = 'HTTP codes: '
self.total_count = 0
template = {
'count': {'tpl': '{:>,}'}, # noqa: E241
'last': {'tpl': '+{:>,}'}, # noqa: E241
'percent': {'tpl': '{:>.2f}%'}, # noqa: E241
'code': {'tpl': '{:>3}', 'final': '{{{:}:<{len}}}'}, # noqa: E241
'description': {'tpl': '{:<10}', 'final': '{{{:}:<{len}}}'}
}
delimiters = [' ', ' ', ' : ', ' ']
self.formatter = TableFormatter(template, delimiters)
def add_second(self, data):
self.last_dist = data["overall"]["proto_code"]["count"]
for code, count in self.last_dist.items():
self.total_count += count
self.overall_dist[code] += count
def __code_color(self, code):
colors = {(200, 299): self.screen.markup.GREEN,
(300, 399): self.screen.markup.CYAN,
(400, 499): self.screen.markup.YELLOW,
(500, 599): self.screen.markup.RED}
if code in list(self.last_dist.keys()):
for left, right in colors:
if left <= int(code) <= right:
return colors[(left, right)]
return self.screen.markup.MAGENTA
else:
return ''
def __code_descr(self, code):
if int(code) in util.HTTP:
return util.HTTP[int(code)]
else:
return 'N/A'
def render(self, expected_width=None):
prepared = [(self.screen.markup.WHITE, self.title)]
if not self.overall_dist:
prepared.append(('',))
else:
data = []
for code, count in sorted(self.overall_dist.items()):
if code in self.last_dist:
last_count = self.last_dist[code]
else:
last_count = 0
data.append({
'count': count,
'last': last_count,
'percent': 100 * safe_div(count, self.total_count),
'code': code,
'description': self.__code_descr(code)
})
table = self.formatter.render_table(data, ['count', 'last', 'percent', 'code', 'description'])
for num, line in enumerate(data):
color = self.__code_color(line['code'])
prepared.append((color, table[num][0]))
self.width, self.lines = self.fill_rectangle(prepared)
class CurrentNetBlock(AbstractBlock):
""" NET codes with highlight"""
def __init__(self, screen):
AbstractBlock.__init__(self, screen)
self.overall_dist = defaultdict(int)
self.title = 'Net codes:'
self.total_count = 0
template = {
'count': {'tpl': '{:>,}'}, # noqa: E241
'last': {'tpl': '+{:>,}'}, # noqa: E241
'percent': {'tpl': '{:>.2f}%'}, # noqa: E241
'code': {'tpl': '{:>2}', 'final': '{{{:}:<{len}}}'}, # noqa: E241
'description': {'tpl': '{:<10}', 'final': '{{{:}:<{len}}}'}
}
delimiters = [' ', ' ', ' : ', ' ']
self.formatter = TableFormatter(template, delimiters)
def add_second(self, data):
self.last_dist = data["overall"]["net_code"]["count"]
for code, count in self.last_dist.items():
self.total_count += count
self.overall_dist[code] += count
def __code_descr(self, code):
if int(code) in util.NET:
return util.NET[int(code)]
else:
return 'N/A'
def __code_color(self, code):
if code in list(self.last_dist.keys()):
if int(code) == 0:
return self.screen.markup.GREEN
elif int(code) == 314:
return self.screen.markup.MAGENTA
else:
return self.screen.markup.RED
else:
return ''
def render(self, expected_width=None):
prepared = [(self.screen.markup.WHITE, self.title)]
if not self.overall_dist:
prepared.append(('',))
else:
data = []
for code, count in sorted(self.overall_dist.items()):
if code in self.last_dist:
last_count = self.last_dist[code]
else:
last_count = 0
data.append({
'count': count,
'last': last_count,
'percent': 100 * safe_div(count, self.total_count),
'code': code,
'description': self.__code_descr(code)
})
table = self.formatter.render_table(data, ['count', 'last', 'percent', 'code', 'description'])
for num, line in enumerate(data):
color = self.__code_color(line['code'])
prepared.append((color, table[num][0]))
self.width, self.lines = self.fill_rectangle(prepared)
class AnswSizesBlock(AbstractBlock):
""" Answer and response sizes, if available """
def __init__(self, screen, sizes_max_spark=120):
AbstractBlock.__init__(self, screen)
self.sparkline = Sparkline(sizes_max_spark)
self.overall = {'count': 0, 'Response': 0, 'Request': 0}
self.last = {'count': 0, 'Response': 0, 'Request': 0}
self.title = 'Average Sizes (all/last), bytes:'
template = {
'name': {'tpl': '{:>}'},
'avg': {'tpl': '{:>,.1f}'},
'last_avg': {'tpl': '{:>,.1f}'}
}
delimiters = [': ', ' / ']
self.formatter = TableFormatter(template, delimiters)
def add_second(self, data):
self.last['count'] = data["overall"]["interval_real"]["len"]
self.last['Request'] = data["overall"]["size_out"]["total"]
self.last['Response'] = data["overall"]["size_in"]["total"]
self.overall['count'] += self.last['count']
self.overall['Request'] += self.last['Request']
self.overall['Response'] += self.last['Response']
ts = data['ts']
for direction in ['Request', 'Response']:
self.sparkline.add(ts, direction, self.last[direction] / self.last['count'])
def render(self, expected_width=None):
prepared = [(self.screen.markup.WHITE, self.title)]
if self.overall['count']:
overall_avg = avg_from_dict(self.overall)
last_avg = avg_from_dict(self.last)
data = []
for direction in ['Request', 'Response']:
data.append({
'name': direction,
'avg': overall_avg[direction],
'last_avg': last_avg[direction]
})
table = self.formatter.render_table(data, ['name', 'avg', 'last_avg'])
for num, direction in enumerate(['Request', 'Response']):
spark_len = expected_width - self.clean_len(table[0]) - 3
spark = self.sparkline.get_sparkline(direction, spark_len=spark_len)
prepared.append(table[num] + (' ',) + tuple(spark))
else:
self.lines.append(('',))
self.lines.append(('',))
self.width, self.lines = self.fill_rectangle(prepared)
class AvgTimesBlock(AbstractBlock):
""" Average times breakdown """
def __init__(self, screen, times_max_spark=120):
AbstractBlock.__init__(self, screen)
self.sparkline = Sparkline(times_max_spark)
self.fraction_keys = [
'interval_real', 'connect_time', 'send_time', 'latency', 'receive_time']
self.fraction_names = {
'interval_real': 'Overall',
'connect_time': 'Connect', # noqa: E241
'send_time': 'Send', # noqa: E241
'latency': 'Latency', # noqa: E241
'receive_time': 'Receive'} # noqa: E241
self.overall = dict([(k, 0) for k in self.fraction_keys])
self.overall['count'] = 0
self.last = dict([(k, 0) for k in self.fraction_keys])
self.last['count'] = 0
self.title = 'Average Times (all/last), ms:'
template = {
'name': {'tpl': '{:>}'},
'avg': {'tpl': '{:>,.2f}'},
'last_avg': {'tpl': '{:>,.2f}'}
}
delimiters = [': ', ' / ']
self.formatter = TableFormatter(template, delimiters)
def add_second(self, data):
self.last = {}
self.last['count'] = data["overall"]["interval_real"]["len"]
self.overall['count'] += self.last['count']
ts = data["ts"]
for fraction in self.fraction_keys:
self.last[fraction] = float(data["overall"][fraction]["total"]) / 1000
self.overall[fraction] += self.last[fraction]
self.sparkline.add(ts, fraction, self.last[fraction] / self.last['count'])
def render(self, expected_width=None):
prepared = [(self.screen.markup.WHITE, self.title)]
if self.overall['count']:
overall_avg = avg_from_dict(self.overall)
last_avg = avg_from_dict(self.last)
data = []
for fraction in self.fraction_keys:
data.append({
'name': self.fraction_names[fraction],
'avg': overall_avg[fraction],
'last_avg': last_avg[fraction]
})
table = self.formatter.render_table(data, ['name', 'avg', 'last_avg'])
for num, fraction in enumerate(self.fraction_keys):
spark_len = expected_width - self.clean_len(table[0]) - 3
spark = self.sparkline.get_sparkline(fraction, spark_len=spark_len)
prepared.append(table[num] + (' ',) + tuple(spark))
else:
for fraction in self.fraction_keys:
prepared.append(('-',))
self.width, self.lines = self.fill_rectangle(prepared)
class CasesBlock(AbstractBlock):
""" Cases info """
def __init__(self, screen, cases_sort_by='http_err', cases_max_spark=60, reorder_delay=5, max_case_len=32):
AbstractBlock.__init__(self, screen)
self.cumulative_cases = {}
self.last_cases = {}
self.title = 'Cumulative Cases Info:'
self.max_case_len = max_case_len
self.cases_order = []
self.reorder_delay = reorder_delay
self.sparkline = Sparkline(cases_max_spark)
self.cases_sort_by = cases_sort_by
self.last_reordered = time.time()
self.field_order = ['name', 'count', 'percent', 'last', 'net_err', 'http_err', 'avg', 'last_avg']
template = {
'name': {'tpl': '{:>}:', 'header': 'name', 'final': '{{{:}:>{len}}}'}, # noqa: E241
'count': {'tpl': '{:>,}', 'header': 'count'}, # noqa: E241
'last': {'tpl': '+{:>,}', 'header': 'last'}, # noqa: E241
'percent': {'tpl': '{:>.2f}%', 'header': '%'}, # noqa: E241
'net_err': {'tpl': '{:>,}', 'header': 'net_e'}, # noqa: E241
'http_err': {'tpl': '{:>,}', 'header': 'http_e'}, # noqa: E241
'avg': {'tpl': '{:>,.1f}', 'header': 'avg ms'}, # noqa: E241
'last_avg': {'tpl': '{:>,.1f}', 'header': 'last ms'}
}
delimiters = [' ']
self.formatter = TableFormatter(template, delimiters)
def add_second(self, data):
def prepare_data(tag_data, display_name):
count = tag_data["interval_real"]["len"]
time = tag_data["interval_real"]["total"] / 1000
net_err, http_err, spark_color = combine_codes(tag_data, self.screen.markup)
if spark_color == self.screen.markup.GREEN:
text_color = self.screen.markup.WHITE
else:
text_color = spark_color
return (spark_color, {
'count': count, 'net_err': net_err, 'http_err': http_err,
'time': time, 'color': text_color, 'display_name': display_name})
ts = data["ts"]
overall = data["overall"]
self.last_cases = {}
spark_color, self.last_cases[0] = prepare_data(overall, 'OVERALL')
self.sparkline.add(ts, 0, self.last_cases[0]['count'], color=spark_color)
tagged = data["tagged"]
for tag_name, tag_data in tagged.items():
# decode symbols to utf-8 in order to support cyrillic symbols in cases
name = tag_name
spark_color, self.last_cases[name] = prepare_data(tag_data, name)
self.sparkline.add(ts, name, self.last_cases[name]['count'], color=spark_color)
for name in self.last_cases:
if name not in self.cumulative_cases:
self.cumulative_cases[name] = {}
for k in ['count', 'net_err', 'http_err', 'time', 'display_name']:
self.cumulative_cases[name][k] = self.last_cases[name][k]
else:
for k in ['count', 'net_err', 'http_err', 'time']:
self.cumulative_cases[name][k] += self.last_cases[name][k]
def __cut_name(self, name):
if len(name) > self.max_case_len:
return name[:self.max_case_len - 1] + '\u2026'
else:
return name
def __reorder_cases(self):
sorted_cases = sorted(self.cumulative_cases.items(),
key=lambda k_v: -1 * k_v[1][self.cases_sort_by])
new_order = [case for (case, data) in sorted_cases]
now = time.time()
if now - self.reorder_delay > self.last_reordered:
self.cases_order = new_order
self.last_reordered = now
else:
if len(new_order) > len(self.cases_order):
for case in new_order:
if case not in self.cases_order:
self.cases_order.append(case)
def render(self, expected_width=None):
prepared = [(self.screen.markup.WHITE, self.title)]
if 0 in self.cumulative_cases: # 0 used as special name for OVERALL to avoid name collision
total_count = self.cumulative_cases[0]['count']
self.__reorder_cases()
data = []
for name in self.cases_order:
case_data = self.cumulative_cases[name]
if name in self.last_cases:
last = self.last_cases[name]
else:
last = {'count': 0, 'net_err': 0, 'http_err': 0, 'time': 0, 'color': '', 'display_name': name}
data.append({
'full_name': name,
'name': self.__cut_name(case_data['display_name']),
'count': case_data['count'],
'percent': 100 * safe_div(case_data['count'], total_count),
'last': last['count'],
'net_err': case_data['net_err'],
'http_err': case_data['http_err'],
'avg': safe_div(case_data['time'], case_data['count']),
'last_avg': safe_div(last['time'], last['count'])
})
table = self.formatter.render_table(data, self.field_order)
prepared.append(table[0]) # first line is table header
for num, line in enumerate(data):
full_name = line['full_name']
if full_name in self.last_cases:
color = self.last_cases[full_name]['color']
else:
color = ''
spark_len = expected_width - self.clean_len(table[0]) - 3
spark = self.sparkline.get_sparkline(full_name, spark_len=spark_len)
prepared.append((color,) + table[num + 1] + (' ',) + tuple(spark))
for _ in range(3 - len(self.cumulative_cases)):
prepared.append(('',))
self.width, self.lines = self.fill_rectangle(prepared)
| lgpl-2.1 |
DonBeo/scikit-learn | examples/exercises/plot_cv_digits.py | 232 | 1206 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn import cross_validation, datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_validation.cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
| bsd-3-clause |
zmlabe/IceVarFigs | Scripts/SeaIce/plot_sit_PIOMAS_monthly_v2.py | 1 | 8177 | """
Author : Zachary M. Labe
Date : 23 August 2016
"""
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import numpy as np
import datetime
import calendar as cal
import matplotlib.colors as c
import cmocean
### Define constants
### Directory and time
directoryfigure = './Figures/'
directorydata = './Data/'
now = datetime.datetime.now()
month = now.month
years = np.arange(1979,2019,1)
months = np.arange(1,13,1)
def readPiomas(directory,vari,years,thresh):
"""
Reads binary PIOMAS data
"""
### Retrieve Grid
grid = np.genfromtxt(directory + 'grid.txt')
grid = np.reshape(grid,(grid.size))
### Define Lat/Lon
lon = grid[:grid.size//2]
lons = np.reshape(lon,(120,360))
lat = grid[grid.size//2:]
lats = np.reshape(lat,(120,360))
### Call variables from PIOMAS
if vari == 'thick':
files = 'heff'
directory = directory + 'Thickness/'
elif vari == 'sic':
files = 'area'
directory = directory + 'SeaIceConcentration/'
elif vari == 'snow':
files = 'snow'
directory = directory + 'SnowCover/'
elif vari == 'oflux':
files = 'oflux'
directory = directory + 'OceanFlux/'
### Read data from binary into numpy arrays
var = np.empty((len(years),12,120,360))
for i in range(len(years)):
data = np.fromfile(directory + files + '_%s.H' % (years[i]),
dtype = 'float32')
### Reshape into [year,month,lat,lon]
months = int(data.shape[0]/(120*360))
if months != 12:
lastyearq = np.reshape(data,(months,120,360))
emptymo = np.empty((12-months,120,360))
emptymo[:,:,:] = np.nan
lastyear = np.append(lastyearq,emptymo,axis=0)
var[i,:,:,:] = lastyear
else:
dataq = np.reshape(data,(12,120,360))
var[i,:,:,:] = dataq
### Mask out threshold values
var[np.where(var <= thresh)] = np.nan
print('Completed: Read "%s" data!' % (vari))
return lats,lons,var
lats,lons,sit = readPiomas(directorydata,'thick',years,0.1)
### Read SIV data
years2,aug = np.genfromtxt(directorydata + 'monthly_piomas.txt',
unpack=True,delimiter='',usecols=[0,2])
climyr = np.where((years2 >= 1981) & (years2 <= 2010))[0]
clim = np.nanmean(aug[climyr])
### Select month
sit = sit[:,1,:,:]
### Adjust axes in time series plots
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
plt.rc('savefig',facecolor='black')
plt.rc('axes',edgecolor='k')
plt.rc('xtick',color='white')
plt.rc('ytick',color='white')
plt.rc('axes',labelcolor='white')
plt.rc('axes',facecolor='black')
## Plot global temperature anomalies
style = 'polar'
### Define figure
if style == 'ortho':
m = Basemap(projection='ortho',lon_0=-90,
lat_0=70,resolution='l',round=True)
elif style == 'polar':
m = Basemap(projection='npstere',boundinglat=67,lon_0=270,resolution='l',round =True)
for i in range(aug.shape[0]):
fig = plt.figure()
ax = plt.subplot(111)
for txt in fig.texts:
txt.set_visible(False)
var = sit[i,:,:]
m.drawmapboundary(fill_color='k')
m.drawlsmask(land_color='k',ocean_color='k')
m.drawcoastlines(color='aqua',linewidth=0.7)
# Make the plot continuous
barlim = np.arange(0,6,1)
cmap = cmocean.cm.thermal
cs = m.contourf(lons,lats,var,
np.arange(0,5.1,0.25),extend='max',
alpha=1,latlon=True,cmap=cmap)
if i >= 39:
ccc = 'slateblue'
else:
ccc = 'w'
t1 = plt.annotate(r'\textbf{%s}' % years[i],textcoords='axes fraction',
xy=(0,0), xytext=(-0.54,0.815),
fontsize=50,color=ccc)
t2 = plt.annotate(r'\textbf{GRAPHIC}: Zachary Labe (@ZLabe)',
textcoords='axes fraction',
xy=(0,0), xytext=(1.02,-0.0),
fontsize=4.5,color='darkgrey',rotation=90,va='bottom')
t3 = plt.annotate(r'\textbf{SOURCE}: http://psc.apl.washington.edu/zhang/IDAO/data.html',
textcoords='axes fraction',
xy=(0,0), xytext=(1.05,-0.0),
fontsize=4.5,color='darkgrey',rotation=90,va='bottom')
t4 = plt.annotate(r'\textbf{DATA}: PIOMAS v2.1 (Zhang and Rothrock, 2003) (\textbf{February})',
textcoords='axes fraction',
xy=(0,0), xytext=(1.08,-0.0),
fontsize=4.5,color='darkgrey',rotation=90,va='bottom')
cbar = m.colorbar(cs,drawedges=True,location='bottom',pad = 0.14,size=0.07)
ticks = np.arange(0,8,1)
cbar.set_ticks(barlim)
cbar.set_ticklabels(list(map(str,barlim)))
cbar.set_label(r'\textbf{SEA ICE THICKNESS [m]}',fontsize=10,
color='darkgrey')
cbar.ax.tick_params(axis='x', size=.0001)
cbar.ax.tick_params(labelsize=7)
###########################################################################
###########################################################################
### Create subplot
a = plt.axes([.2, .225, .08, .4], axisbg='k')
N = 1
ind = np.linspace(N,0.2,1)
width = .33
meansiv = np.nanmean(aug)
rects = plt.bar(ind,[aug[i]],width,zorder=2)
# plt.plot(([meansiv]*2),zorder=1)
rects[0].set_color('aqua')
if i == 39:
rects[0].set_color('slateblue')
adjust_spines(a, ['left', 'bottom'])
a.spines['top'].set_color('none')
a.spines['right'].set_color('none')
a.spines['left'].set_color('none')
a.spines['bottom'].set_color('none')
plt.setp(a.get_xticklines()[0:-1],visible=False)
a.tick_params(labelbottom='off')
a.tick_params(labelleft='off')
a.tick_params('both',length=0,width=0,which='major')
plt.yticks(np.arange(0,31,5),map(str,np.arange(0,31,5)))
plt.xlabel(r'\textbf{SEA ICE VOLUME [km$^{3}$]}',
fontsize=10,color='darkgrey',labelpad=1)
for rectq in rects:
height = rectq.get_height()
cc = 'darkgrey'
if i == 39:
cc ='slateblue'
plt.text(rectq.get_x() + rectq.get_width()/2.0,
height+1, r'\textbf{%s}' % format(int(height*1000),",d"),
ha='center', va='bottom',color=cc,fontsize=20)
fig.subplots_adjust(right=1.1)
###########################################################################
###########################################################################
if i < 10:
plt.savefig(directory + 'icy_0%s.png' % i,dpi=300)
else:
plt.savefig(directory + 'icy_%s.png' % i,dpi=300)
if i == 39:
plt.savefig(directory + 'icy_39.png',dpi=300)
plt.savefig(directory + 'icy_40.png',dpi=300)
plt.savefig(directory + 'icy_41.png',dpi=300)
plt.savefig(directory + 'icy_42.png',dpi=300)
plt.savefig(directory + 'icy_43.png',dpi=300)
plt.savefig(directory + 'icy_44.png',dpi=300)
plt.savefig(directory + 'icy_45.png',dpi=300)
plt.savefig(directory + 'icy_46.png',dpi=300)
plt.savefig(directory + 'icy_47.png',dpi=300)
plt.savefig(directory + 'icy_48.png',dpi=300)
plt.savefig(directory + 'icy_49.png',dpi=300)
plt.savefig(directory + 'icy_50.png',dpi=300)
plt.savefig(directory + 'icy_51.png',dpi=300)
t1.remove()
t2.remove()
t3.remove()
t4.remove()
| mit |
lenovor/scikit-learn | sklearn/datasets/samples_generator.py | 45 | 56433 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import warnings
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator=False,
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array or sparse CSR matrix of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
if return_indicator:
lb = MultiLabelBinarizer()
Y = lb.fit([range(n_classes)]).transform(Y)
else:
warnings.warn('Support for the sequence of sequences multilabel '
'representation is being deprecated and replaced with '
'a sparse indicator matrix. '
'return_indicator will default to True from version '
'0.17.',
DeprecationWarning)
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
lail3344/sms-tools | lectures/04-STFT/plots-code/windows-2.py | 24 | 1026 | import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DF
import utilFunctions as UF
import math
(fs, x) = UF.wavread('../../../sounds/violin-B3.wav')
N = 1024
pin = 5000
w = np.ones(801)
hM1 = int(math.floor((w.size+1)/2))
hM2 = int(math.floor(w.size/2))
x1 = x[pin-hM1:pin+hM2]
plt.figure(1, figsize=(9.5, 5))
plt.subplot(3,1,1)
plt.plot(np.arange(-hM1, hM2), x1, lw=1.5)
plt.axis([-hM1, hM2, min(x1), max(x1)])
plt.title('x (violin-B3.wav)')
mX, pX = DF.dftAnal(x1, w, N)
mX = mX - max(mX)
plt.subplot(3,1,2)
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0,N/4,-70,0])
plt.title ('mX (rectangular window)')
w = np.blackman(801)
mX, pX = DF.dftAnal(x1, w, N)
mX = mX - max(mX)
plt.subplot(3,1,3)
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0,N/4,-70,0])
plt.title ('mX (blackman window)')
plt.tight_layout()
plt.savefig('windows-2.png')
plt.show()
| agpl-3.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/linear_model/__init__.py | 34 | 3161 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .huber import HuberRegressor
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'HuberRegressor',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| mit |
metaml/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/pyplot.py | 69 | 77521 | import sys
import matplotlib
from matplotlib import _pylab_helpers, interactive
from matplotlib.cbook import dedent, silent_list, is_string_like, is_numlike
from matplotlib.figure import Figure, figaspect
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.image import imread as _imread
from matplotlib import rcParams, rcParamsDefault, get_backend
from matplotlib.rcsetup import interactive_bk as _interactive_bk
from matplotlib.artist import getp, get, Artist
from matplotlib.artist import setp as _setp
from matplotlib.axes import Axes
from matplotlib.projections import PolarAxes
from matplotlib import mlab # for csv2rec in plotfile
from matplotlib.scale import get_scale_docs, get_scale_names
from matplotlib import cm
from matplotlib.cm import get_cmap
# We may not need the following imports here:
from matplotlib.colors import Normalize, normalize # latter for backwards compat.
from matplotlib.lines import Line2D
from matplotlib.text import Text, Annotation
from matplotlib.patches import Polygon, Rectangle, Circle, Arrow
from matplotlib.widgets import SubplotTool, Button, Slider, Widget
from ticker import TickHelper, Formatter, FixedFormatter, NullFormatter,\
FuncFormatter, FormatStrFormatter, ScalarFormatter,\
LogFormatter, LogFormatterExponent, LogFormatterMathtext,\
Locator, IndexLocator, FixedLocator, NullLocator,\
LinearLocator, LogLocator, AutoLocator, MultipleLocator,\
MaxNLocator
## Backend detection ##
def _backend_selection():
""" If rcParams['backend_fallback'] is true, check to see if the
current backend is compatible with the current running event
loop, and if not switches to a compatible one.
"""
backend = rcParams['backend']
if not rcParams['backend_fallback'] or \
backend not in _interactive_bk:
return
is_agg_backend = rcParams['backend'].endswith('Agg')
if 'wx' in sys.modules and not backend in ('WX', 'WXAgg'):
import wx
if wx.App.IsMainLoopRunning():
rcParams['backend'] = 'wx' + 'Agg' * is_agg_backend
elif 'qt' in sys.modules and not backend == 'QtAgg':
import qt
if not qt.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qtAgg'
elif 'PyQt4.QtCore' in sys.modules and not backend == 'Qt4Agg':
import PyQt4.QtGui
if not PyQt4.QtGui.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qt4Agg'
elif 'gtk' in sys.modules and not backend in ('GTK', 'GTKAgg',
'GTKCairo'):
import gobject
if gobject.MainLoop().is_running():
rcParams['backend'] = 'gtk' + 'Agg' * is_agg_backend
elif 'Tkinter' in sys.modules and not backend == 'TkAgg':
#import Tkinter
pass #what if anything do we need to do for tkinter?
_backend_selection()
## Global ##
from matplotlib.backends import pylab_setup
new_figure_manager, draw_if_interactive, show = pylab_setup()
def findobj(o=None, match=None):
if o is None:
o = gcf()
return o.findobj(match)
findobj.__doc__ = Artist.findobj.__doc__
def switch_backend(newbackend):
"""
Switch the default backend to newbackend. This feature is
**experimental**, and is only expected to work switching to an
image backend. Eg, if you have a bunch of PostScript scripts that
you want to run from an interactive ipython session, you may want
to switch to the PS backend before running them to avoid having a
bunch of GUI windows popup. If you try to interactively switch
from one GUI backend to another, you will explode.
Calling this command will close all open windows.
"""
close('all')
global new_figure_manager, draw_if_interactive, show
matplotlib.use(newbackend, warn=False)
reload(matplotlib.backends)
from matplotlib.backends import pylab_setup
new_figure_manager, draw_if_interactive, show = pylab_setup()
def isinteractive():
"""
Return the interactive status
"""
return matplotlib.is_interactive()
def ioff():
'Turn interactive mode off.'
matplotlib.interactive(False)
def ion():
'Turn interactive mode on.'
matplotlib.interactive(True)
def rc(*args, **kwargs):
matplotlib.rc(*args, **kwargs)
if matplotlib.rc.__doc__ is not None:
rc.__doc__ = dedent(matplotlib.rc.__doc__)
def rcdefaults():
matplotlib.rcdefaults()
draw_if_interactive()
if matplotlib.rcdefaults.__doc__ is not None:
rcdefaults.__doc__ = dedent(matplotlib.rcdefaults.__doc__)
# The current "image" (ScalarMappable) is tracked here on a
# per-pylab-session basis:
def gci():
"""
Get the current :class:`~matplotlib.cm.ScalarMappable` instance
(image or patch collection), or *None* if no images or patch
collections have been defined. The commands
:func:`~matplotlib.pyplot.imshow` and
:func:`~matplotlib.pyplot.figimage` create
:class:`~matplotlib.image.Image` instances, and the commands
:func:`~matplotlib.pyplot.pcolor` and
:func:`~matplotlib.pyplot.scatter` create
:class:`~matplotlib.collections.Collection` instances.
"""
return gci._current
gci._current = None
def sci(im):
"""
Set the current image (target of colormap commands like
:func:`~matplotlib.pyplot.jet`, :func:`~matplotlib.pyplot.hot` or
:func:`~matplotlib.pyplot.clim`).
"""
gci._current = im
## Any Artist ##
# (getp is simply imported)
def setp(*args, **kwargs):
ret = _setp(*args, **kwargs)
draw_if_interactive()
return ret
if _setp.__doc__ is not None:
setp.__doc__ = _setp.__doc__
## Figures ##
def figure(num=None, # autoincrement if None, else integer from 1-N
figsize = None, # defaults to rc figure.figsize
dpi = None, # defaults to rc figure.dpi
facecolor = None, # defaults to rc figure.facecolor
edgecolor = None, # defaults to rc figure.edgecolor
frameon = True,
FigureClass = Figure,
**kwargs
):
"""
call signature::
figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
Create a new figure and return a :class:`matplotlib.figure.Figure`
instance. If *num* = *None*, the figure number will be incremented and
a new figure will be created. The returned figure objects have a
*number* attribute holding this number.
If *num* is an integer, and ``figure(num)`` already exists, make it
active and return the handle to it. If ``figure(num)`` does not exist
it will be created. Numbering starts at 1, matlab style::
figure(1)
If you are creating many figures, make sure you explicitly call "close"
on the figures you are not using, because this will enable pylab
to properly clean up the memory.
Optional keyword arguments:
========= =======================================================
Keyword Description
========= =======================================================
figsize width x height in inches; defaults to rc figure.figsize
dpi resolution; defaults to rc figure.dpi
facecolor the background color; defaults to rc figure.facecolor
edgecolor the border color; defaults to rc figure.edgecolor
========= =======================================================
rcParams defines the default values, which can be modified in the
matplotlibrc file
*FigureClass* is a :class:`~matplotlib.figure.Figure` or derived
class that will be passed on to :meth:`new_figure_manager` in the
backends which allows you to hook custom Figure classes into the
pylab interface. Additional kwargs will be passed on to your
figure init function.
"""
if figsize is None : figsize = rcParams['figure.figsize']
if dpi is None : dpi = rcParams['figure.dpi']
if facecolor is None : facecolor = rcParams['figure.facecolor']
if edgecolor is None : edgecolor = rcParams['figure.edgecolor']
if num is None:
allnums = [f.num for f in _pylab_helpers.Gcf.get_all_fig_managers()]
if allnums:
num = max(allnums) + 1
else:
num = 1
else:
num = int(num) # crude validation of num argument
figManager = _pylab_helpers.Gcf.get_fig_manager(num)
if figManager is None:
if get_backend().lower() == 'ps': dpi = 72
figManager = new_figure_manager(num, figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
**kwargs)
# make this figure current on button press event
def make_active(event):
_pylab_helpers.Gcf.set_active(figManager)
cid = figManager.canvas.mpl_connect('button_press_event', make_active)
figManager._cidgcf = cid
_pylab_helpers.Gcf.set_active(figManager)
figManager.canvas.figure.number = num
draw_if_interactive()
return figManager.canvas.figure
def gcf():
"Return a handle to the current figure."
figManager = _pylab_helpers.Gcf.get_active()
if figManager is not None:
return figManager.canvas.figure
else:
return figure()
def get_current_fig_manager():
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None:
gcf() # creates an active figure as a side effect
figManager = _pylab_helpers.Gcf.get_active()
return figManager
# note we check for __doc__ is not None since py2exe optimize removes
# the docstrings
def connect(s, func):
return get_current_fig_manager().canvas.mpl_connect(s, func)
if FigureCanvasBase.mpl_connect.__doc__ is not None:
connect.__doc__ = dedent(FigureCanvasBase.mpl_connect.__doc__)
def disconnect(cid):
return get_current_fig_manager().canvas.mpl_disconnect(cid)
if FigureCanvasBase.mpl_disconnect.__doc__ is not None:
disconnect.__doc__ = dedent(FigureCanvasBase.mpl_disconnect.__doc__)
def close(*args):
"""
Close a figure window
``close()`` by itself closes the current figure
``close(num)`` closes figure number *num*
``close(h)`` where *h* is a :class:`Figure` instance, closes that figure
``close('all')`` closes all the figure windows
"""
if len(args)==0:
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None: return
else:
figManager.canvas.mpl_disconnect(figManager._cidgcf)
_pylab_helpers.Gcf.destroy(figManager.num)
elif len(args)==1:
arg = args[0]
if arg=='all':
for manager in _pylab_helpers.Gcf.get_all_fig_managers():
manager.canvas.mpl_disconnect(manager._cidgcf)
_pylab_helpers.Gcf.destroy(manager.num)
elif isinstance(arg, int):
_pylab_helpers.Gcf.destroy(arg)
elif isinstance(arg, Figure):
for manager in _pylab_helpers.Gcf.get_all_fig_managers():
if manager.canvas.figure==arg:
manager.canvas.mpl_disconnect(manager._cidgcf)
_pylab_helpers.Gcf.destroy(manager.num)
else:
raise TypeError('Unrecognized argument type %s to close'%type(arg))
else:
raise TypeError('close takes 0 or 1 arguments')
def clf():
"""
Clear the current figure
"""
gcf().clf()
draw_if_interactive()
def draw():
'redraw the current figure'
get_current_fig_manager().canvas.draw()
def savefig(*args, **kwargs):
fig = gcf()
return fig.savefig(*args, **kwargs)
if Figure.savefig.__doc__ is not None:
savefig.__doc__ = dedent(Figure.savefig.__doc__)
def ginput(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* clicks from the user and return a list of the
coordinates of each click.
If *timeout* is negative, does not timeout.
"""
return gcf().ginput(*args, **kwargs)
if Figure.ginput.__doc__ is not None:
ginput.__doc__ = dedent(Figure.ginput.__doc__)
def waitforbuttonpress(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* key or mouse clicks from the user and
return a list containing True's for keyboard clicks and False's
for mouse clicks.
If *timeout* is negative, does not timeout.
"""
return gcf().waitforbuttonpress(*args, **kwargs)
if Figure.waitforbuttonpress.__doc__ is not None:
waitforbuttonpress.__doc__ = dedent(Figure.waitforbuttonpress.__doc__)
# Putting things in figures
def figtext(*args, **kwargs):
ret = gcf().text(*args, **kwargs)
draw_if_interactive()
return ret
if Figure.text.__doc__ is not None:
figtext.__doc__ = dedent(Figure.text.__doc__)
def suptitle(*args, **kwargs):
ret = gcf().suptitle(*args, **kwargs)
draw_if_interactive()
return ret
if Figure.suptitle.__doc__ is not None:
suptitle.__doc__ = dedent(Figure.suptitle.__doc__)
def figimage(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
ret = gcf().figimage(*args, **kwargs)
draw_if_interactive()
gci._current = ret
return ret
if Figure.figimage.__doc__ is not None:
figimage.__doc__ = dedent(Figure.figimage.__doc__) + """
Addition kwargs: hold = [True|False] overrides default hold state"""
def figlegend(handles, labels, loc, **kwargs):
"""
Place a legend in the figure.
*labels*
a sequence of strings
*handles*
a sequence of :class:`~matplotlib.lines.Line2D` or
:class:`~matplotlib.patches.Patch` instances
*loc*
can be a string or an integer specifying the legend
location
A :class:`matplotlib.legend.Legend` instance is returned.
Example::
figlegend( (line1, line2, line3),
('label1', 'label2', 'label3'),
'upper right' )
.. seealso::
:func:`~matplotlib.pyplot.legend`:
For information about the location codes
"""
l = gcf().legend(handles, labels, loc, **kwargs)
draw_if_interactive()
return l
## Figure and Axes hybrid ##
def hold(b=None):
"""
Set the hold state. If *b* is None (default), toggle the
hold state, else set the hold state to boolean value *b*::
hold() # toggle hold
hold(True) # hold is on
hold(False) # hold is off
When *hold* is *True*, subsequent plot commands will be added to
the current axes. When *hold* is *False*, the current axes and
figure will be cleared on the next plot command.
"""
fig = gcf()
ax = fig.gca()
fig.hold(b)
ax.hold(b)
# b=None toggles the hold state, so let's get get the current hold
# state; but should pyplot hold toggle the rc setting - me thinks
# not
b = ax.ishold()
rc('axes', hold=b)
def ishold():
"""
Return the hold status of the current axes
"""
return gca().ishold()
def over(func, *args, **kwargs):
"""
over calls::
func(*args, **kwargs)
with ``hold(True)`` and then restores the hold state.
"""
h = ishold()
hold(True)
func(*args, **kwargs)
hold(h)
## Axes ##
def axes(*args, **kwargs):
"""
Add an axes at position rect specified by:
- ``axes()`` by itself creates a default full ``subplot(111)`` window axis.
- ``axes(rect, axisbg='w')`` where *rect* = [left, bottom, width,
height] in normalized (0, 1) units. *axisbg* is the background
color for the axis, default white.
- ``axes(h)`` where *h* is an axes instance makes *h* the current
axis. An :class:`~matplotlib.axes.Axes` instance is returned.
======= ============ ================================================
kwarg Accepts Desctiption
======= ============ ================================================
axisbg color the axes background color
frameon [True|False] display the frame?
sharex otherax current axes shares xaxis attribute with otherax
sharey otherax current axes shares yaxis attribute with otherax
polar [True|False] use a polar axes?
======= ============ ================================================
Examples:
* :file:`examples/pylab_examples/axes_demo.py` places custom axes.
* :file:`examples/pylab_examples/shared_axis_demo.py` uses
*sharex* and *sharey*.
"""
nargs = len(args)
if len(args)==0: return subplot(111, **kwargs)
if nargs>1:
raise TypeError('Only one non keyword arg to axes allowed')
arg = args[0]
if isinstance(arg, Axes):
a = gcf().sca(arg)
else:
rect = arg
a = gcf().add_axes(rect, **kwargs)
draw_if_interactive()
return a
def delaxes(*args):
"""
``delaxes(ax)``: remove *ax* from the current figure. If *ax*
doesn't exist, an error will be raised.
``delaxes()``: delete the current axes
"""
if not len(args):
ax = gca()
else:
ax = args[0]
ret = gcf().delaxes(ax)
draw_if_interactive()
return ret
def gca(**kwargs):
"""
Return the current axis instance. This can be used to control
axis properties either using set or the
:class:`~matplotlib.axes.Axes` methods, for example, setting the
xaxis range::
plot(t,s)
set(gca(), 'xlim', [0,10])
or::
plot(t,s)
a = gca()
a.set_xlim([0,10])
"""
ax = gcf().gca(**kwargs)
return ax
# More ways of creating axes:
def subplot(*args, **kwargs):
"""
Create a subplot command, creating axes with::
subplot(numRows, numCols, plotNum)
where *plotNum* = 1 is the first plot number and increasing *plotNums*
fill rows first. max(*plotNum*) == *numRows* * *numCols*
You can leave out the commas if *numRows* <= *numCols* <=
*plotNum* < 10, as in::
subplot(211) # 2 rows, 1 column, first (upper) plot
``subplot(111)`` is the default axis.
New subplots that overlap old will delete the old axes. If you do
not want this behavior, use
:meth:`matplotlib.figure.Figure.add_subplot` or the
:func:`~matplotlib.pyplot.axes` command. Eg.::
from pylab import *
plot([1,2,3]) # implicitly creates subplot(111)
subplot(211) # overlaps, subplot(111) is killed
plot(rand(12), rand(12))
subplot(212, axisbg='y') # creates 2nd subplot with yellow background
Keyword arguments:
*axisbg*:
The background color of the subplot, which can be any valid
color specifier. See :mod:`matplotlib.colors` for more
information.
*polar*:
A boolean flag indicating whether the subplot plot should be
a polar projection. Defaults to False.
*projection*:
A string giving the name of a custom projection to be used
for the subplot. This projection must have been previously
registered. See :func:`matplotlib.projections.register_projection`
.. seealso::
:func:`~matplotlib.pyplot.axes`:
For additional information on :func:`axes` and
:func:`subplot` keyword arguments.
:file:`examples/pylab_examples/polar_scatter.py`
**Example:**
.. plot:: mpl_examples/pylab_examples/subplot_demo.py
"""
fig = gcf()
a = fig.add_subplot(*args, **kwargs)
bbox = a.bbox
byebye = []
for other in fig.axes:
if other==a: continue
if bbox.fully_overlaps(other.bbox):
byebye.append(other)
for ax in byebye: delaxes(ax)
draw_if_interactive()
return a
def twinx(ax=None):
"""
Make a second axes overlay *ax* (or the current axes if *ax* is
*None*) sharing the xaxis. The ticks for *ax2* will be placed on
the right, and the *ax2* instance is returned.
.. seealso::
:file:`examples/api_examples/two_scales.py`
"""
if ax is None:
ax=gca()
ax1 = ax.twinx()
draw_if_interactive()
return ax1
def twiny(ax=None):
"""
Make a second axes overlay *ax* (or the current axes if *ax* is
*None*) sharing the yaxis. The ticks for *ax2* will be placed on
the top, and the *ax2* instance is returned.
"""
if ax is None:
ax=gca()
ax1 = ax.twiny()
draw_if_interactive()
return ax1
def subplots_adjust(*args, **kwargs):
"""
call signature::
subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
Tune the subplot layout via the
:class:`matplotlib.figure.SubplotParams` mechanism. The parameter
meanings (and suggested defaults) are::
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for blank space between subplots
hspace = 0.2 # the amount of height reserved for white space between subplots
The actual defaults are controlled by the rc file
"""
fig = gcf()
fig.subplots_adjust(*args, **kwargs)
draw_if_interactive()
def subplot_tool(targetfig=None):
"""
Launch a subplot tool window for *targetfig* (default gcf).
A :class:`matplotlib.widgets.SubplotTool` instance is returned.
"""
tbar = rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
rcParams['toolbar'] = 'None'
if targetfig is None:
manager = get_current_fig_manager()
targetfig = manager.canvas.figure
else:
# find the manager for this figure
for manager in _pylab_helpers.Gcf._activeQue:
if manager.canvas.figure==targetfig: break
else: raise RuntimeError('Could not find manager for targetfig')
toolfig = figure(figsize=(6,3))
toolfig.subplots_adjust(top=0.9)
ret = SubplotTool(targetfig, toolfig)
rcParams['toolbar'] = tbar
_pylab_helpers.Gcf.set_active(manager) # restore the current figure
return ret
def box(on=None):
"""
Turn the axes box on or off according to *on*.
If *on* is *None*, toggle state.
"""
ax = gca()
if on is None:
on = not ax.get_frame_on()
ax.set_frame_on(on)
draw_if_interactive()
def title(s, *args, **kwargs):
"""
Set the title of the current axis to *s*.
Default font override is::
override = {'fontsize': 'medium',
'verticalalignment': 'bottom',
'horizontalalignment': 'center'}
.. seealso::
:func:`~matplotlib.pyplot.text`:
for information on how override and the optional args work.
"""
l = gca().set_title(s, *args, **kwargs)
draw_if_interactive()
return l
## Axis ##
def axis(*v, **kwargs):
"""
Set/Get the axis properties:
>>> axis()
returns the current axes limits ``[xmin, xmax, ymin, ymax]``.
>>> axis(v)
sets the min and max of the x and y axes, with
``v = [xmin, xmax, ymin, ymax]``.
>>> axis('off')
turns off the axis lines and labels.
>>> axis('equal')
changes limits of *x* or *y* axis so that equal increments of *x*
and *y* have the same length; a circle is circular.
>>> axis('scaled')
achieves the same result by changing the dimensions of the plot box instead
of the axis data limits.
>>> axis('tight')
changes *x* and *y* axis limits such that all data is shown. If
all data is already shown, it will move it to the center of the
figure without modifying (*xmax* - *xmin*) or (*ymax* -
*ymin*). Note this is slightly different than in matlab.
>>> axis('image')
is 'scaled' with the axis limits equal to the data limits.
>>> axis('auto')
and
>>> axis('normal')
are deprecated. They restore default behavior; axis limits are automatically
scaled to make the data fit comfortably within the plot box.
if ``len(*v)==0``, you can pass in *xmin*, *xmax*, *ymin*, *ymax*
as kwargs selectively to alter just those limits without changing
the others.
The xmin, xmax, ymin, ymax tuple is returned
.. seealso::
:func:`xlim`, :func:`ylim`
"""
ax = gca()
v = ax.axis(*v, **kwargs)
draw_if_interactive()
return v
def xlabel(s, *args, **kwargs):
"""
Set the *x* axis label of the current axis to *s*
Default override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'top',
'horizontalalignment' : 'center'
}
.. seealso::
:func:`~matplotlib.pyplot.text`:
For information on how override and the optional args work
"""
l = gca().set_xlabel(s, *args, **kwargs)
draw_if_interactive()
return l
def ylabel(s, *args, **kwargs):
"""
Set the *y* axis label of the current axis to *s*.
Defaults override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'center',
'horizontalalignment' : 'right',
'rotation'='vertical' : }
.. seealso::
:func:`~matplotlib.pyplot.text`:
For information on how override and the optional args
work.
"""
l = gca().set_ylabel(s, *args, **kwargs)
draw_if_interactive()
return l
def xlim(*args, **kwargs):
"""
Set/Get the xlimits of the current axes::
xmin, xmax = xlim() # return the current xlim
xlim( (xmin, xmax) ) # set the xlim to xmin, xmax
xlim( xmin, xmax ) # set the xlim to xmin, xmax
If you do not specify args, you can pass the xmin and xmax as
kwargs, eg.::
xlim(xmax=3) # adjust the max leaving min unchanged
xlim(xmin=1) # adjust the min leaving max unchanged
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
ret = ax.set_xlim(*args, **kwargs)
draw_if_interactive()
return ret
def ylim(*args, **kwargs):
"""
Set/Get the ylimits of the current axes::
ymin, ymax = ylim() # return the current ylim
ylim( (ymin, ymax) ) # set the ylim to ymin, ymax
ylim( ymin, ymax ) # set the ylim to ymin, ymax
If you do not specify args, you can pass the *ymin* and *ymax* as
kwargs, eg.::
ylim(ymax=3) # adjust the max leaving min unchanged
ylim(ymin=1) # adjust the min leaving max unchanged
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
ret = ax.set_ylim(*args, **kwargs)
draw_if_interactive()
return ret
def xscale(*args, **kwargs):
"""
call signature::
xscale(scale, **kwargs)
Set the scaling for the x-axis: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ret = ax.set_xscale(*args, **kwargs)
draw_if_interactive()
return ret
xscale.__doc__ = dedent(xscale.__doc__) % {
'scale': ' | '.join([repr(_x) for _x in get_scale_names()]),
'scale_docs': get_scale_docs()}
def yscale(*args, **kwargs):
"""
call signature::
xscale(scale, **kwargs)
Set the scaling for the y-axis: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ret = ax.set_yscale(*args, **kwargs)
draw_if_interactive()
return ret
yscale.__doc__ = dedent(yscale.__doc__) % {
'scale': ' | '.join([repr(_x) for _x in get_scale_names()]),
'scale_docs': get_scale_docs()}
def xticks(*args, **kwargs):
"""
Set/Get the xlimits of the current ticklocs and labels::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = xticks()
# set the locations of the xticks
xticks( arange(6) )
# set the locations and labels of the xticks
xticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties.
"""
ax = gca()
if len(args)==0:
locs = ax.get_xticks()
labels = ax.get_xticklabels()
elif len(args)==1:
locs = ax.set_xticks(args[0])
labels = ax.get_xticklabels()
elif len(args)==2:
locs = ax.set_xticks(args[0])
labels = ax.set_xticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to xticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return locs, silent_list('Text xticklabel', labels)
def yticks(*args, **kwargs):
"""
Set/Get the ylimits of the current ticklocs and labels::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = yticks()
# set the locations of the yticks
yticks( arange(6) )
# set the locations and labels of the yticks
yticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties.
"""
ax = gca()
if len(args)==0:
locs = ax.get_yticks()
labels = ax.get_yticklabels()
elif len(args)==1:
locs = ax.set_yticks(args[0])
labels = ax.get_yticklabels()
elif len(args)==2:
locs = ax.set_yticks(args[0])
labels = ax.set_yticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to yticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return ( locs,
silent_list('Text yticklabel', labels)
)
def rgrids(*args, **kwargs):
"""
Set/Get the radial locations of the gridlines and ticklabels on a
polar plot.
call signatures::
lines, labels = rgrids()
lines, labels = rgrids(radii, labels=None, angle=22.5, **kwargs)
When called with no arguments, :func:`rgrid` simply returns the
tuple (*lines*, *labels*), where *lines* is an array of radial
gridlines (:class:`~matplotlib.lines.Line2D` instances) and
*labels* is an array of tick labels
(:class:`~matplotlib.text.Text` instances). When called with
arguments, the labels will appear at the specified radial
distances and angles.
*labels*, if not *None*, is a len(*radii*) list of strings of the
labels to use at each angle.
If *labels* is None, the rformatter will be used
Examples::
# set the locations of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0) )
# set the locations and labels of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0), ('Tom', 'Dick', 'Harry' )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.yaxis.get_ticklines()
labels = ax.yaxis.get_ticklabels()
else:
lines, labels = ax.set_rgrids(*args, **kwargs)
draw_if_interactive()
return ( silent_list('Line2D rgridline', lines),
silent_list('Text rgridlabel', labels) )
def thetagrids(*args, **kwargs):
"""
Set/Get the theta locations of the gridlines and ticklabels.
If no arguments are passed, return a tuple (*lines*, *labels*)
where *lines* is an array of radial gridlines
(:class:`~matplotlib.lines.Line2D` instances) and *labels* is an
array of tick labels (:class:`~matplotlib.text.Text` instances)::
lines, labels = thetagrids()
Otherwise the syntax is::
lines, labels = thetagrids(angles, labels=None, fmt='%d', frac = 1.1)
set the angles at which to place the theta grids (these gridlines
are equal along the theta dimension).
*angles* is in degrees.
*labels*, if not *None*, is a len(angles) list of strings of the
labels to use at each angle.
If *labels* is *None*, the labels will be ``fmt%angle``.
*frac* is the fraction of the polar axes radius at which to place
the label (1 is the edge). Eg. 1.05 is outside the axes and 0.95
is inside the axes.
Return value is a list of tuples (*lines*, *labels*):
- *lines* are :class:`~matplotlib.lines.Line2D` instances
- *labels* are :class:`~matplotlib.text.Text` instances.
Note that on input, the *labels* argument is a list of strings,
and on output it is a list of :class:`~matplotlib.text.Text`
instances.
Examples::
# set the locations of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90) )
# set the locations and labels of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90), ('NE', 'NW', 'SW','SE') )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.xaxis.get_ticklines()
labels = ax.xaxis.get_ticklabels()
else:
lines, labels = ax.set_thetagrids(*args, **kwargs)
draw_if_interactive()
return (silent_list('Line2D thetagridline', lines),
silent_list('Text thetagridlabel', labels)
)
## Plotting Info ##
def plotting():
"""
Plotting commands
=============== =========================================================
Command Description
=============== =========================================================
axes Create a new axes
axis Set or return the current axis limits
bar make a bar chart
boxplot make a box and whiskers chart
cla clear current axes
clabel label a contour plot
clf clear a figure window
close close a figure window
colorbar add a colorbar to the current figure
cohere make a plot of coherence
contour make a contour plot
contourf make a filled contour plot
csd make a plot of cross spectral density
draw force a redraw of the current figure
errorbar make an errorbar graph
figlegend add a legend to the figure
figimage add an image to the figure, w/o resampling
figtext add text in figure coords
figure create or change active figure
fill make filled polygons
fill_between make filled polygons
gca return the current axes
gcf return the current figure
gci get the current image, or None
getp get a handle graphics property
hist make a histogram
hold set the hold state on current axes
legend add a legend to the axes
loglog a log log plot
imread load image file into array
imshow plot image data
matshow display a matrix in a new figure preserving aspect
pcolor make a pseudocolor plot
plot make a line plot
plotfile plot data from a flat file
psd make a plot of power spectral density
quiver make a direction field (arrows) plot
rc control the default params
savefig save the current figure
scatter make a scatter plot
setp set a handle graphics property
semilogx log x axis
semilogy log y axis
show show the figures
specgram a spectrogram plot
stem make a stem plot
subplot make a subplot (numrows, numcols, axesnum)
table add a table to the axes
text add some text at location x,y to the current axes
title add a title to the current axes
xlabel add an xlabel to the current axes
ylabel add a ylabel to the current axes
=============== =========================================================
The following commands will set the default colormap accordingly:
* autumn
* bone
* cool
* copper
* flag
* gray
* hot
* hsv
* jet
* pink
* prism
* spring
* summer
* winter
* spectral
"""
pass
def get_plot_commands(): return ( 'axes', 'axis', 'bar', 'boxplot', 'cla', 'clf',
'close', 'colorbar', 'cohere', 'csd', 'draw', 'errorbar',
'figlegend', 'figtext', 'figimage', 'figure', 'fill', 'gca',
'gcf', 'gci', 'get', 'gray', 'barh', 'jet', 'hist', 'hold', 'imread',
'imshow', 'legend', 'loglog', 'quiver', 'rc', 'pcolor', 'pcolormesh', 'plot', 'psd',
'savefig', 'scatter', 'set', 'semilogx', 'semilogy', 'show',
'specgram', 'stem', 'subplot', 'table', 'text', 'title', 'xlabel',
'ylabel', 'pie', 'polar')
def colors():
"""
This is a do nothing function to provide you with help on how
matplotlib handles colors.
Commands which take color arguments can use several formats to
specify the colors. For the basic builtin colors, you can use a
single letter
===== =======
Alias Color
===== =======
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
===== =======
For a greater range of colors, you have two options. You can
specify the color using an html hex string, as in::
color = '#eeefff'
or you can pass an R,G,B tuple, where each of R,G,B are in the
range [0,1].
You can also use any legal html name for a color, for example::
color = 'red',
color = 'burlywood'
color = 'chartreuse'
The example below creates a subplot with a dark
slate gray background
subplot(111, axisbg=(0.1843, 0.3098, 0.3098))
Here is an example that creates a pale turqoise title::
title('Is this the best color?', color='#afeeee')
"""
pass
def colormaps():
"""
matplotlib provides the following colormaps.
* autumn
* bone
* cool
* copper
* flag
* gray
* hot
* hsv
* jet
* pink
* prism
* spring
* summer
* winter
* spectral
You can set the colormap for an image, pcolor, scatter, etc,
either as a keyword argument::
imshow(X, cmap=cm.hot)
or post-hoc using the corresponding pylab interface function::
imshow(X)
hot()
jet()
In interactive mode, this will update the colormap allowing you to
see which one works best for your data.
"""
pass
## Plotting part 1: manually generated functions and wrappers ##
from matplotlib.colorbar import colorbar_doc
def colorbar(mappable=None, cax=None, ax=None, **kw):
if mappable is None:
mappable = gci()
if ax is None:
ax = gca()
ret = gcf().colorbar(mappable, cax = cax, ax=ax, **kw)
draw_if_interactive()
return ret
colorbar.__doc__ = colorbar_doc
def clim(vmin=None, vmax=None):
"""
Set the color limits of the current image
To apply clim to all axes images do::
clim(0, 0.5)
If either *vmin* or *vmax* is None, the image min/max respectively
will be used for color scaling.
If you want to set the clim of multiple images,
use, for example::
for im in gca().get_images():
im.set_clim(0, 0.05)
"""
im = gci()
if im is None:
raise RuntimeError('You must first define an image, eg with imshow')
im.set_clim(vmin, vmax)
draw_if_interactive()
def imread(*args, **kwargs):
return _imread(*args, **kwargs)
if _imread.__doc__ is not None:
imread.__doc__ = dedent(_imread.__doc__)
def matshow(A, fignum=None, **kw):
"""
Display an array as a matrix in a new figure window.
The origin is set at the upper left hand corner and rows (first
dimension of the array) are displayed horizontally. The aspect
ratio of the figure window is that of the array, unless this would
make an excessively short or narrow figure.
Tick labels for the xaxis are placed on top.
With the exception of fignum, keyword arguments are passed to
:func:`~matplotlib.pyplot.imshow`.
*fignum*: [ None | integer | False ]
By default, :func:`matshow` creates a new figure window with
automatic numbering. If *fignum* is given as an integer, the
created figure will use this figure number. Because of how
:func:`matshow` tries to set the figure aspect ratio to be the
one of the array, if you provide the number of an already
existing figure, strange things may happen.
If *fignum* is *False* or 0, a new figure window will **NOT** be created.
"""
if fignum is False or fignum is 0:
ax = gca()
else:
# Extract actual aspect ratio of array and make appropriately sized figure
fig = figure(fignum, figsize=figaspect(A))
ax = fig.add_axes([0.15, 0.09, 0.775, 0.775])
im = ax.matshow(A, **kw)
gci._current = im
draw_if_interactive()
return im
def polar(*args, **kwargs):
"""
call signature::
polar(theta, r, **kwargs)
Make a polar plot. Multiple *theta*, *r* arguments are supported,
with format strings, as in :func:`~matplotlib.pyplot.plot`.
"""
ax = gca(polar=True)
ret = ax.plot(*args, **kwargs)
draw_if_interactive()
return ret
def plotfile(fname, cols=(0,), plotfuncs=None,
comments='#', skiprows=0, checkrows=5, delimiter=',',
**kwargs):
"""
Plot the data in *fname*
*cols* is a sequence of column identifiers to plot. An identifier
is either an int or a string. If it is an int, it indicates the
column number. If it is a string, it indicates the column header.
matplotlib will make column headers lower case, replace spaces with
underscores, and remove all illegal characters; so ``'Adj Close*'``
will have name ``'adj_close'``.
- If len(*cols*) == 1, only that column will be plotted on the *y* axis.
- If len(*cols*) > 1, the first element will be an identifier for
data for the *x* axis and the remaining elements will be the
column indexes for multiple subplots
*plotfuncs*, if not *None*, is a dictionary mapping identifier to
an :class:`~matplotlib.axes.Axes` plotting function as a string.
Default is 'plot', other choices are 'semilogy', 'fill', 'bar',
etc. You must use the same type of identifier in the *cols*
vector as you use in the *plotfuncs* dictionary, eg., integer
column numbers in both or column names in both.
*comments*, *skiprows*, *checkrows*, and *delimiter* are all passed on to
:func:`matplotlib.pylab.csv2rec` to load the data into a record array.
kwargs are passed on to plotting functions.
Example usage::
# plot the 2nd and 4th column against the 1st in two subplots
plotfile(fname, (0,1,3))
# plot using column names; specify an alternate plot type for volume
plotfile(fname, ('date', 'volume', 'adj_close'), plotfuncs={'volume': 'semilogy'})
"""
fig = figure()
if len(cols)<1:
raise ValueError('must have at least one column of data')
if plotfuncs is None:
plotfuncs = dict()
r = mlab.csv2rec(fname, comments=comments,
skiprows=skiprows, checkrows=checkrows, delimiter=delimiter)
def getname_val(identifier):
'return the name and column data for identifier'
if is_string_like(identifier):
return identifier, r[identifier]
elif is_numlike(identifier):
name = r.dtype.names[int(identifier)]
return name, r[name]
else:
raise TypeError('identifier must be a string or integer')
xname, x = getname_val(cols[0])
if len(cols)==1:
ax1 = fig.add_subplot(1,1,1)
funcname = plotfuncs.get(cols[0], 'plot')
func = getattr(ax1, funcname)
func(x, **kwargs)
ax1.set_xlabel(xname)
else:
N = len(cols)
for i in range(1,N):
if i==1:
ax = ax1 = fig.add_subplot(N-1,1,i)
ax.grid(True)
else:
ax = fig.add_subplot(N-1,1,i, sharex=ax1)
ax.grid(True)
yname, y = getname_val(cols[i])
funcname = plotfuncs.get(cols[i], 'plot')
func = getattr(ax, funcname)
func(x, y, **kwargs)
ax.set_ylabel(yname)
if ax.is_last_row():
ax.set_xlabel(xname)
else:
ax.set_xlabel('')
if xname=='date':
fig.autofmt_xdate()
draw_if_interactive()
## Plotting part 2: autogenerated wrappers for axes methods ##
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def acorr(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().acorr(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.acorr.__doc__ is not None:
acorr.__doc__ = dedent(Axes.acorr.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def arrow(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().arrow(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.arrow.__doc__ is not None:
arrow.__doc__ = dedent(Axes.arrow.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axhline(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axhline(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axhline.__doc__ is not None:
axhline.__doc__ = dedent(Axes.axhline.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axhspan(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axhspan(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axhspan.__doc__ is not None:
axhspan.__doc__ = dedent(Axes.axhspan.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axvline(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axvline(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axvline.__doc__ is not None:
axvline.__doc__ = dedent(Axes.axvline.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axvspan(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axvspan(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axvspan.__doc__ is not None:
axvspan.__doc__ = dedent(Axes.axvspan.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bar(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().bar(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.bar.__doc__ is not None:
bar.__doc__ = dedent(Axes.bar.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def barh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().barh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.barh.__doc__ is not None:
barh.__doc__ = dedent(Axes.barh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def broken_barh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().broken_barh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.broken_barh.__doc__ is not None:
broken_barh.__doc__ = dedent(Axes.broken_barh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def boxplot(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().boxplot(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.boxplot.__doc__ is not None:
boxplot.__doc__ = dedent(Axes.boxplot.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cohere(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().cohere(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.cohere.__doc__ is not None:
cohere.__doc__ = dedent(Axes.cohere.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def clabel(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().clabel(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.clabel.__doc__ is not None:
clabel.__doc__ = dedent(Axes.clabel.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def contour(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().contour(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
if ret._A is not None: gci._current = ret
hold(b)
return ret
if Axes.contour.__doc__ is not None:
contour.__doc__ = dedent(Axes.contour.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def contourf(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().contourf(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
if ret._A is not None: gci._current = ret
hold(b)
return ret
if Axes.contourf.__doc__ is not None:
contourf.__doc__ = dedent(Axes.contourf.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def csd(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().csd(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.csd.__doc__ is not None:
csd.__doc__ = dedent(Axes.csd.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def errorbar(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().errorbar(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.errorbar.__doc__ is not None:
errorbar.__doc__ = dedent(Axes.errorbar.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def fill(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().fill(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.fill.__doc__ is not None:
fill.__doc__ = dedent(Axes.fill.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def fill_between(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().fill_between(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.fill_between.__doc__ is not None:
fill_between.__doc__ = dedent(Axes.fill_between.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hexbin(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hexbin(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.hexbin.__doc__ is not None:
hexbin.__doc__ = dedent(Axes.hexbin.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hist(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hist(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.hist.__doc__ is not None:
hist.__doc__ = dedent(Axes.hist.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hlines(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hlines(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.hlines.__doc__ is not None:
hlines.__doc__ = dedent(Axes.hlines.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def imshow(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().imshow(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.imshow.__doc__ is not None:
imshow.__doc__ = dedent(Axes.imshow.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def loglog(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().loglog(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.loglog.__doc__ is not None:
loglog.__doc__ = dedent(Axes.loglog.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pcolor(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pcolor(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.pcolor.__doc__ is not None:
pcolor.__doc__ = dedent(Axes.pcolor.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pcolormesh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pcolormesh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.pcolormesh.__doc__ is not None:
pcolormesh.__doc__ = dedent(Axes.pcolormesh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pie(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pie(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.pie.__doc__ is not None:
pie.__doc__ = dedent(Axes.pie.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def plot(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().plot(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.plot.__doc__ is not None:
plot.__doc__ = dedent(Axes.plot.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def plot_date(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().plot_date(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.plot_date.__doc__ is not None:
plot_date.__doc__ = dedent(Axes.plot_date.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def psd(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().psd(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.psd.__doc__ is not None:
psd.__doc__ = dedent(Axes.psd.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def quiver(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().quiver(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.quiver.__doc__ is not None:
quiver.__doc__ = dedent(Axes.quiver.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def quiverkey(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().quiverkey(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.quiverkey.__doc__ is not None:
quiverkey.__doc__ = dedent(Axes.quiverkey.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def scatter(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().scatter(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.scatter.__doc__ is not None:
scatter.__doc__ = dedent(Axes.scatter.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def semilogx(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().semilogx(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.semilogx.__doc__ is not None:
semilogx.__doc__ = dedent(Axes.semilogx.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def semilogy(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().semilogy(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.semilogy.__doc__ is not None:
semilogy.__doc__ = dedent(Axes.semilogy.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def specgram(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().specgram(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret[-1]
hold(b)
return ret
if Axes.specgram.__doc__ is not None:
specgram.__doc__ = dedent(Axes.specgram.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spy(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().spy(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.spy.__doc__ is not None:
spy.__doc__ = dedent(Axes.spy.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def stem(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().stem(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.stem.__doc__ is not None:
stem.__doc__ = dedent(Axes.stem.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def step(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().step(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.step.__doc__ is not None:
step.__doc__ = dedent(Axes.step.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def vlines(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().vlines(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.vlines.__doc__ is not None:
vlines.__doc__ = dedent(Axes.vlines.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def xcorr(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().xcorr(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.xcorr.__doc__ is not None:
xcorr.__doc__ = dedent(Axes.xcorr.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def barbs(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().barbs(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.barbs.__doc__ is not None:
barbs.__doc__ = dedent(Axes.barbs.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cla(*args, **kwargs):
ret = gca().cla(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.cla.__doc__ is not None:
cla.__doc__ = dedent(Axes.cla.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def grid(*args, **kwargs):
ret = gca().grid(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.grid.__doc__ is not None:
grid.__doc__ = dedent(Axes.grid.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def legend(*args, **kwargs):
ret = gca().legend(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.legend.__doc__ is not None:
legend.__doc__ = dedent(Axes.legend.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def table(*args, **kwargs):
ret = gca().table(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.table.__doc__ is not None:
table.__doc__ = dedent(Axes.table.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def text(*args, **kwargs):
ret = gca().text(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.text.__doc__ is not None:
text.__doc__ = dedent(Axes.text.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def annotate(*args, **kwargs):
ret = gca().annotate(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.annotate.__doc__ is not None:
annotate.__doc__ = dedent(Axes.annotate.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def autumn():
'''
set the default colormap to autumn and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='autumn')
im = gci()
if im is not None:
im.set_cmap(cm.autumn)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bone():
'''
set the default colormap to bone and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='bone')
im = gci()
if im is not None:
im.set_cmap(cm.bone)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cool():
'''
set the default colormap to cool and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='cool')
im = gci()
if im is not None:
im.set_cmap(cm.cool)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def copper():
'''
set the default colormap to copper and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='copper')
im = gci()
if im is not None:
im.set_cmap(cm.copper)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def flag():
'''
set the default colormap to flag and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='flag')
im = gci()
if im is not None:
im.set_cmap(cm.flag)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def gray():
'''
set the default colormap to gray and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='gray')
im = gci()
if im is not None:
im.set_cmap(cm.gray)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hot():
'''
set the default colormap to hot and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hot')
im = gci()
if im is not None:
im.set_cmap(cm.hot)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hsv():
'''
set the default colormap to hsv and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hsv')
im = gci()
if im is not None:
im.set_cmap(cm.hsv)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def jet():
'''
set the default colormap to jet and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='jet')
im = gci()
if im is not None:
im.set_cmap(cm.jet)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pink():
'''
set the default colormap to pink and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='pink')
im = gci()
if im is not None:
im.set_cmap(cm.pink)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def prism():
'''
set the default colormap to prism and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='prism')
im = gci()
if im is not None:
im.set_cmap(cm.prism)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spring():
'''
set the default colormap to spring and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spring')
im = gci()
if im is not None:
im.set_cmap(cm.spring)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def summer():
'''
set the default colormap to summer and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='summer')
im = gci()
if im is not None:
im.set_cmap(cm.summer)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def winter():
'''
set the default colormap to winter and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='winter')
im = gci()
if im is not None:
im.set_cmap(cm.winter)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spectral():
'''
set the default colormap to spectral and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spectral')
im = gci()
if im is not None:
im.set_cmap(cm.spectral)
draw_if_interactive()
| agpl-3.0 |
karthikvadla16/spark-tk | regression-tests/sparktkregtests/testcases/dicom/dicom_filter_test.py | 13 | 10428 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests dicom.filter functionality"""
import unittest
from sparktkregtests.lib import sparktk_test
import os
import dicom
import numpy
import random
from lxml import etree
import datetime
class DicomFilterTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""import dicom data for testing"""
super(DicomFilterTest, self).setUp()
self.dataset = self.get_file("dicom_uncompressed")
self.dicom = self.context.dicom.import_dcm(self.dataset)
self.xml_directory = "../../../datasets/dicom/dicom_uncompressed/xml/"
self.image_directory = "../../../datasets/dicom/dicom_uncompressed/imagedata/"
self.query = ".//DicomAttribute[@keyword='KEYWORD']/Value/text()"
self.count = self.dicom.metadata.count()
def test_filter_one_key(self):
"""test filter with basic filter function"""
# extract a key-value pair from the first row metadata for our use
first_row = self.dicom.metadata.to_pandas()["metadata"][0]
xml = etree.fromstring(first_row.encode("ascii", "ignore"))
patient_id = xml.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
# ask dicom to filter using our key-value filter function
self.dicom.filter(self._filter_key_values({ "PatientID" : patient_id }))
# we generate our own result to compare to dicom's
expected_result = self._filter({ "PatientID" : patient_id })
# ensure results match
self._compare_dicom_with_expected_result(expected_result)
def test_filter_multi_key(self):
"""test filter with basic filter function mult keyval pairs"""
# first we extract key-value pairs from the first row's metadata
# for our own use to generate a key-val dictionary
first_row = self.dicom.metadata.to_pandas()["metadata"][0]
xml = etree.fromstring(first_row.encode("ascii", "ignore"))
patient_id = xml.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
sopi_id = xml.xpath(self.query.replace("KEYWORD", "SOPInstanceUID"))[0]
key_val = { "PatientID" : patient_id, "SOPInstanceUID" : sopi_id }
# we use our filter function and ask dicom to filter
self.dicom.filter(self._filter_key_values(key_val))
# here we generate our own result
expected_result = self._filter(key_val)
# compare expected result to what dicom gave us
self._compare_dicom_with_expected_result(expected_result)
def test_filter_zero_matching_records(self):
"""test filter with filter function returns none"""
# we give dicom a filter function which filters by
# key-value and give it a key-value pair which will
# return 0 records
pandas = self.dicom.metadata.to_pandas()
self.dicom.filter(self._filter_key_values({ "PatientID" : -6 }))
self.assertEqual(0, self.dicom.metadata.count())
def test_filter_nothing(self):
"""test filter with filter function filters nothing"""
# this filter function will return all records
self.dicom.filter(self._filter_nothing())
self.assertEqual(self.dicom.metadata.count(), self.count)
def test_filter_everything(self):
"""test filter function filter everything"""
# filter_everything filter out all of the records
self.dicom.filter(self._filter_everything())
self.assertEqual(0, self.dicom.metadata.count())
def test_filter_timestamp_range(self):
"""test filter with timestamp range function"""
# we will test filter with a function which takes a begin and end
# date and returns all records with a study date between them
# we will set begin date to 15 years ago and end date to 5 years ago
begin_date = datetime.datetime.now() - datetime.timedelta(days=15*365)
end_date = datetime.datetime.now() - datetime.timedelta(days=5*365)
# here we will generate our own result by filtering for records
# which meet our criteria
expected_result = []
pandas = self.dicom.metadata.to_pandas()
# iterate through the rows and append all records with
# a study date between our begin and end date
for index, row in pandas.iterrows():
ascii_row = row["metadata"].encode("ascii", "ignore")
xml_root = etree.fromstring(ascii_row)
study_date = xml_root.xpath(self.query.replace("KEYWORD", "StudyDate"))[0]
datetime_study_date = datetime.datetime.strptime(study_date, "%Y%m%d")
if datetime_study_date > begin_date and datetime_study_date < end_date:
expected_result.append(ascii_row)
# now we ask dicom to use our filter function below to return
# all records with a StudyDate within our specified range
self.dicom.filter(self._filter_timestamp_range(begin_date, end_date))
# ensure that expected result matches actual
self._compare_dicom_with_expected_result(expected_result)
def test_return_type_str(self):
"""test filter with function that returns strings"""
self.dicom.filter(self._filter_return_string())
self.assertEqual(3, self.dicom.metadata.count())
def test_return_type_int(self):
"""test filter wtih function that returns ints"""
self.dicom.filter(self._filter_return_int())
self.assertEqual(3, self.dicom.metadata.count())
def test_filter_has_bugs(self):
"""test filter with a broken filter function"""
with self.assertRaisesRegexp(Exception, "this filter is broken!"):
self.dicom.filter(self._filter_has_bugs())
self.dicom.metadata.count()
def test_filter_invalid_param(self):
"""test filter with an invalid param type"""
# should fail because filter takes a function not a keyvalue pair
with self.assertRaisesRegexp(Exception, "'dict' object is not callable"):
self.dicom.filter({ "PatientID" : "bla" })
self.dicom.metadata.count()
def test_filter_invalid_function(self):
"""test filter with function which takes more than one param"""
with self.assertRaisesRegexp(Exception, "takes exactly 2 arguments"):
self.dicom.filter(self._filter_invalid())
self.dicom.metadata.count()
def _filter_key_values(self, key_val):
"""filter by key-value"""
def _filter_key_value(row):
metadata = row["metadata"].encode("ascii", "ignore")
xml_root = etree.fromstring(metadata)
for key in key_val:
xml_element_value = xml_root.xpath(".//DicomAttribute[@keyword='" + key + "']/Value/text()")[0]
if xml_element_value != key_val[key]:
return False
else:
return True
return _filter_key_value
def _filter_nothing(self):
"""returns all records"""
def _filter_nothing(row):
return True
return _filter_nothing
def _filter_everything(self):
"""returns no records"""
def _filter_everything(row):
return False
return _filter_everything
def _filter_timestamp_range(self, begin_date, end_date):
"""return records within studydate date range"""
def _filter_timestamp_range(row):
metadata = row["metadata"].encode("ascii", "ignore")
xml_root = etree.fromstring(metadata)
timestamp = xml_root.xpath(".//DicomAttribute[@keyword='StudyDate']/Value/text()")[0]
timestamp = datetime.datetime.strptime(timestamp, "%Y%m%d")
if begin_date < timestamp and timestamp < end_date:
return True
else:
return False
return _filter_timestamp_range
def _filter_return_string(self):
"""filter function which returns str"""
def _filter_return_string(row):
return "True"
return _filter_return_string
def _filter_return_int(self):
"""filter function returns int"""
def _filter_return_int(row):
return -1
return _filter_return_int
def _filter_has_bugs(self):
"""broken filter function"""
def _filter_has_bugs(row):
raise Exception("this filter is broken!")
return _filter_has_bugs
def _filter_invalid(self):
"""filter function takes 2 params"""
# filter is invalid because it takes
# 2 parameters
def _filter_invalid(index, row):
return True
return _filter_invalid
def _filter(self, keywords):
"""filter records by key value pair"""
# here we are generating the expected result
matching_records = []
pandas_metadata = self.dicom.metadata.to_pandas()["metadata"]
for row in pandas_metadata:
ascii_xml = row.encode("ascii", "ignore")
xml = etree.fromstring(row.encode("ascii", "ignore"))
for keyword in keywords:
this_row_keyword_value = xml.xpath(self.query.replace("KEYWORD", keyword))
if this_row_keyword_value == keyword:
matching_records.append(ascii_xml)
return matching_records
def _compare_dicom_with_expected_result(self, expected_result):
"""compare expected result with actual result"""
pandas_result = self.dicom.metadata.to_pandas()["metadata"]
for expected, actual in zip(expected_result, pandas_result):
actual_ascii = actual.encode("ascii", "ignore")
self.assertEqual(actual_ascii, expected)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
larsmans/numpy | numpy/linalg/linalg.py | 2 | 73993 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
return wrap(gufunc(a, signature=signature, extobj=extobj).astype(result_t))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning)
mode = 'reduced'
elif mode in ('e', 'economic'):
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, not necessarily ordered, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _ssyevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288+0.j, 5.82842712+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t))
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be always be of complex type. When `a` is real
the resulting eigenvalues will be real (0 imaginary part) or
occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
eigvals : eigenvalues of a non-symmetric array.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t)
return w.astype(result_t), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
A : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues, not necessarily ordered.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _ssyevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t))
vt = vt.astype(result_t)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t)
s = s.astype(_realType(result_t))
vt = vt.astype(result_t)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t))
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[0]/s[-1]
else:
return norm(x, p)*norm(inv(x), p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, than a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
.. versionadded:: 1.6.0.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
return sign.astype(result_t), logdet.astype(real_t)
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(2, 2, 2
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
return _umath_linalg.det(a, signature=signature).astype(result_t)
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
st = s[:min(n, m)].copy().astype(result_real_t)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute the extreme singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of seven different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
.. versionadded:: 1.10.0
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([6, 6])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
# Check the default case first and handle it immediately.
if ord is None and axis is None:
ndim = x.ndim
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis % nd == col_axis % nd:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses an optimal parenthesizations
of the matrices [1]_ [2]_. Depending on the shape of the matrices
this can speed up the multiplication a lot.
The first and last argument can be 1-D and are treated respectively as
row and column vector. The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
First and last argument can be 1-D and are treated respectively as
row and column vector, the other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> import numpy as np
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Example: multiplication costs of different parenthesizations
------------------------------------------------------------
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find best ordering for three arrays and do the multiplication.
Doing in manually instead of using dynamic programing is
approximately 15 times faster.
"""
# cost1 = cost((AB)C)
cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
# cost2 = cost((AB)C)
cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array which encodes the opimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| bsd-3-clause |
southpaw94/MachineLearning | DimensionalityReduction/pca.py | 1 | 2233 | # This script uses the 'primary component analysis' technique to
# determine the k dimensions with the most variance of the
# original set of d features.
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from numpy.linalg import eig
import numpy as np
df_wine = pd.read_csv('Wine.csv', header=None)
X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# Use the standard scaler to standardize the input data rather than
# remaking the wheel by writing a standardization function.
# Only the feature data needs to be standardized, recall that the output
# data is already discretized since we are primarily concerned with
# classification currently.
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.fit_transform(X_test)
# Create the covariance matrix from the transpose of the X_train_std data
cov_mat = np.cov(X_train_std.T)
# Find the eigenvalues and eigenvectors of the covariance matrix
eig_vals, eig_vecs = eig(cov_mat)
total = sum(eig_vals)
var_exp = [(i / total) for i in sorted(eig_vals, reverse=True)]
var_exp_cumul = np.cumsum(var_exp)
plt.bar(range(1,14), var_exp, alpha=0.5, align='center', \
label='Individual Explained Variance')
plt.step(range(1,14), var_exp_cumul, where='mid', \
label='Cumulative Explained Variance')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal Components')
plt.legend(loc='best')
plt.show()
eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:,i]) for i in range(len(eig_vals))]
eig_pairs.sort(reverse=True)
w = np.hstack((eig_pairs[0][1][:, np.newaxis], eig_pairs[1][1][:, np.newaxis]))
print(X_train_std[0].dot(w))
# Transform training data into representative primary component analysis representation
X_train_pca = X_train_std.dot(w)
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
for l, c, m in zip(np.unique(y_train), colors, markers):
plt.scatter(X_train_pca[y_train == l, 0], X_train_pca[y_train == l, 1], c=c, label=l, marker=m)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.show()
| gpl-2.0 |
APMonitor/applications | scheduling_and_control/3products_beginning_application/apm.py | 4 | 26852 | # Import
import csv
import math
import os
import random
import string
import time
import webbrowser
from contextlib import closing
import sys
# Get Python version
ver = sys.version_info[0]
#print('Version: '+str(ver))
if ver==2: # Python 2
import urllib
else: # Python 3+
import urllib.request, urllib.parse, urllib.error
#import socket
if ver==2: # Python 2
def cmd(server, app, aline):
'''Send a request to the server \n \
server = address of server \n \
app = application name \n \
aline = line to send to server \n'''
try:
# Web-server URL address
url_base = string.strip(server) + '/online/apm_line.php'
app = app.lower()
app.replace(" ", "")
params = urllib.urlencode({'p': app, 'a': aline})
f = urllib.urlopen(url_base, params)
# Stream solution output
if(aline=='solve'):
line = ''
while True:
char = f.read(1)
if not char:
break
elif char == '\n':
print(line)
line = ''
else:
line += char
# Send request to web-server
response = f.read()
except:
response = 'Failed to connect to server'
return response
def load_model(server,app,filename):
'''Load APM model file \n \
server = address of server \n \
app = application name \n \
filename = APM file name'''
# Load APM File
f = open(filename,'r')
aline = f.read()
f.close()
app = app.lower()
app.replace(" ","")
response = cmd(server,app,' '+aline)
return
def load_data(server,app,filename):
'''Load CSV data file \n \
server = address of server \n \
app = application name \n \
filename = CSV file name'''
# Load CSV File
f = open(filename,'r')
aline = f.read()
f.close()
app = app.lower()
app.replace(" ","")
response = cmd(server,app,'csv '+aline)
return
def get_ip(server):
'''Get current IP address \n \
server = address of server'''
# get ip address for web-address lookup
url_base = string.strip(server) + '/ip.php'
f = urllib.urlopen(url_base)
ip = string.strip(f.read())
return ip
def apm_t0(server,app,mode):
'''Retrieve restart file \n \
server = address of server \n \
app = application name \n \
mode = {'ss','mpu','rto','sim','est','ctl'} '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = string.strip(server) + '/online/' + ip + '_' + app + '/' + string.strip(mode) + '.t0'
f = urllib.urlopen(url)
# Send request to web-server
solution = f.read()
return solution
def get_solution(server,app):
'''Retrieve solution results\n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = string.strip(server) + '/online/' + ip + '_' + app + '/results.csv'
f = urllib.urlopen(url)
# Send request to web-server
solution = f.read()
# Write the file
sol_file = 'solution_' + app + '.csv'
fh = open(sol_file,'w')
# possible problem here if file isn't able to open (see MATLAB equivalent)
fh.write(solution.replace('\r',''))
fh.close()
# Use array package
from array import array
# Import CSV file from web server
with closing(urllib.urlopen(url)) as f:
reader = csv.reader(f, delimiter=',')
y={}
for row in reader:
if len(row)==2:
y[row[0]] = float(row[1])
else:
y[row[0]] = array('f', [float(col) for col in row[1:]])
# Return solution
return y
def get_file(server,app,filename):
'''Retrieve any file from web-server\n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = string.strip(server) + '/online/' + ip + '_' + app + '/' + filename
f = urllib.urlopen(url)
# Send request to web-server
file = f.read()
# Write the file
fh = open(filename,'w')
fh.write(file.replace('\r',''))
fh.close()
return (file)
def set_option(server,app,name,value):
'''Load APM option \n \
server = address of server \n \
app = application name \n \
name = {FV,MV,SV,CV}.option \n \
value = numeric value of option '''
aline = 'option %s = %f' %(name,value)
app = app.lower()
app.replace(" ","")
response = cmd(server,app,aline)
return response
def web(server,app):
'''Open APM web viewer in local browser \n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = string.strip(server) + '/online/' + ip + '_' + app + '/' + ip + '_' + app + '_oper.htm'
webbrowser.get().open_new_tab(url)
return url
def web_var(server,app):
'''Open APM web viewer in local browser \n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = string.strip(server) + '/online/' + ip + '_' + app + '/' + ip + '_' + app + '_var.htm'
webbrowser.get().open_new_tab(url)
return url
def web_root(server,app):
'''Open APM root folder \n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = string.strip(server) + '/online/' + ip + '_' + app + '/'
webbrowser.get().open_new_tab(url)
return url
def classify(server,app,type,aline):
'''Classify parameter or variable as FV, MV, SV, or CV \n \
server = address of server \n \
app = application name \n \
type = {FV,MV,SV,CV} \n \
aline = parameter or variable name '''
x = 'info' + ' ' + type + ', ' + aline
app = app.lower()
app.replace(" ","")
response = cmd(server,app,x)
return response
def csv_data(filename):
'''Load CSV File into Python
A = csv_data(filename)
Function csv_data extracts data from a comma
separated value (csv) file and returns it
to the array A'''
try:
f = open(filename, 'rb')
reader = csv.reader(f)
headers = reader.next()
c = [float] * (len(headers))
A = {}
for h in headers:
A[h] = []
for row in reader:
for h, v, conv in zip(headers, row, c):
A[h].append(conv(v))
except ValueError:
A = {}
return A
def csv_lookup(name,replay):
'''Lookup Index of CSV Column \n \
name = parameter or variable name \n \
replay = csv replay data to search'''
header = replay[0]
try:
i = header.index(string.strip(name))
except ValueError:
i = -1 # no match
return i
def csv_element(name,row,replay):
'''Retrieve CSV Element \n \
name = parameter or variable name \n \
row = row of csv file \n \
replay = csv replay data to search'''
# get row number
if (row>len(replay)): row = len(replay)-1
# get column number
col = csv_lookup(name,replay)
if (col>=0): value = float(replay[row][col])
else: value = float('nan')
return value
def get_attribute(server,app,name):
'''Retrieve options for FV, MV, SV, or CV \n \
server = address of server \n \
app = application name \n \
name = {FV,MV,SV,CV}.{MEAS,MODEL,NEWVAL} \n \n \
Valid name combinations \n \
{FV,MV,CV}.MEAS \n \
{SV,CV}.MODEL \n \
{FV,MV}.NEWVAL '''
# Web-server URL address
url_base = string.strip(server) + '/online/get_tag.php'
app = app.lower()
app.replace(" ","")
params = urllib.urlencode({'p':app,'n':name})
f = urllib.urlopen(url_base,params)
# Send request to web-server
value = eval(f.read())
return value
def load_meas(server,app,name,value):
'''Transfer measurement to server for FV, MV, or CV \n \
server = address of server \n \
app = application name \n \
name = name of {FV,MV,CV} '''
# Web-server URL address
url_base = string.strip(server) + '/online/meas.php'
app = app.lower()
app.replace(" ","")
params = urllib.urlencode({'p':app,'n':name+'.MEAS','v':value})
f = urllib.urlopen(url_base,params)
# Send request to web-server
response = f.read()
return response
else: # Python 3+
def cmd(server,app,aline):
'''Send a request to the server \n \
server = address of server \n \
app = application name \n \
aline = line to send to server \n'''
try:
# Web-server URL address
url_base = server.strip() + '/online/apm_line.php'
app = app.lower()
app.replace(" ","")
params = urllib.parse.urlencode({'p':app,'a':aline})
en_params = params.encode()
f = urllib.request.urlopen(url_base,en_params)
# Stream solution output
if(aline=='solve'):
line = ''
while True:
en_char = f.read(1)
char = en_char.decode()
if not char:
break
elif char == '\n':
print(line)
line = ''
else:
line += char
# Send request to web-server
en_response = f.read()
response = en_response.decode()
except:
response = 'Failed to connect to server'
return response
def load_model(server,app,filename):
'''Load APM model file \n \
server = address of server \n \
app = application name \n \
filename = APM file name'''
# Load APM File
f = open(filename,'r')
aline = f.read()
f.close()
app = app.lower()
app.replace(" ","")
response = cmd(server,app,' '+aline)
return
def load_data(server,app,filename):
'''Load CSV data file \n \
server = address of server \n \
app = application name \n \
filename = CSV file name'''
# Load CSV File
f = open(filename,'r')
aline = f.read()
f.close()
app = app.lower()
app.replace(" ","")
response = cmd(server,app,'csv '+aline)
return
def get_ip(server):
'''Get current IP address \n \
server = address of server'''
# get ip address for web-address lookup
url_base = server.strip() + '/ip.php'
f = urllib.request.urlopen(url_base)
fip = f.read()
ip = fip.decode().strip()
return ip
def apm_t0(server,app,mode):
'''Retrieve restart file \n \
server = address of server \n \
app = application name \n \
mode = {'ss','mpu','rto','sim','est','ctl'} '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = server.strip() + '/online/' + ip + '_' + app + '/' + mode.strip() + '.t0'
f = urllib.request.urlopen(url)
# Send request to web-server
solution = f.read()
return solution
def get_solution(server,app):
'''Retrieve solution results\n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = server.strip() + '/online/' + ip + '_' + app + '/results.csv'
f = urllib.request.urlopen(url)
# Send request to web-server
solution = f.read()
# Write the file
sol_file = 'solution_' + app + '.csv'
fh = open(sol_file,'w')
# possible problem here if file isn't able to open (see MATLAB equivalent)
en_solution = solution.decode().replace('\r','')
fh.write(en_solution)
fh.close()
# Use array package
from array import array
# Import CSV file from web server
with closing(urllib.request.urlopen(url)) as f:
fr = f.read()
de_f = fr.decode()
reader = csv.reader(de_f.splitlines(), delimiter=',')
y={}
for row in reader:
if len(row)==2:
y[row[0]] = float(row[1])
else:
y[row[0]] = array('f', [float(col) for col in row[1:]])
# Return solution
return y
def get_file(server,app,filename):
'''Retrieve any file from web-server\n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = server.strip() + '/online/' + ip + '_' + app + '/' + filename
f = urllib.request.urlopen(url)
# Send request to web-server
file = f.read()
# Write the file
fh = open(filename,'w')
en_file = file.decode().replace('\r','')
fh.write(en_file)
fh.close()
return (file)
def set_option(server,app,name,value):
'''Load APM option \n \
server = address of server \n \
app = application name \n \
name = {FV,MV,SV,CV}.option \n \
value = numeric value of option '''
aline = 'option %s = %f' %(name,value)
app = app.lower()
app.replace(" ","")
response = cmd(server,app,aline)
return response
def web(server,app):
'''Open APM web viewer in local browser \n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = server.strip() + '/online/' + ip + '_' + app + '/' + ip + '_' + app + '_oper.htm'
webbrowser.get().open_new_tab(url)
return url
def web_var(server,app):
'''Open APM web viewer in local browser \n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = server.strip() + '/online/' + ip + '_' + app + '/' + ip + '_' + app + '_var.htm'
webbrowser.get().open_new_tab(url)
return url
def web_root(server,app):
'''Open APM root folder \n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = server.strip() + '/online/' + ip + '_' + app + '/'
webbrowser.get().open_new_tab(url)
return url
def classify(server,app,type,aline):
'''Classify parameter or variable as FV, MV, SV, or CV \n \
server = address of server \n \
app = application name \n \
type = {FV,MV,SV,CV} \n \
aline = parameter or variable name '''
x = 'info' + ' ' + type + ', ' + aline
app = app.lower()
app.replace(" ","")
response = cmd(server,app,x)
return response
def csv_data(filename):
'''Load CSV File into Python
A = csv_data(filename)
Function csv_data extracts data from a comma
separated value (csv) file and returns it
to the array A'''
try:
f = open(filename, 'rb')
reader = csv.reader(f)
headers = next(reader)
c = [float] * (len(headers))
A = {}
for h in headers:
A[h] = []
for row in reader:
for h, v, conv in zip(headers, row, c):
A[h].append(conv(v))
except ValueError:
A = {}
return A
def csv_lookup(name,replay):
'''Lookup Index of CSV Column \n \
name = parameter or variable name \n \
replay = csv replay data to search'''
header = replay[0]
try:
i = header.index(name.strip())
except ValueError:
i = -1 # no match
return i
def csv_element(name,row,replay):
'''Retrieve CSV Element \n \
name = parameter or variable name \n \
row = row of csv file \n \
replay = csv replay data to search'''
# get row number
if (row>len(replay)): row = len(replay)-1
# get column number
col = csv_lookup(name,replay)
if (col>=0): value = float(replay[row][col])
else: value = float('nan')
return value
def get_attribute(server,app,name):
'''Retrieve options for FV, MV, SV, or CV \n \
server = address of server \n \
app = application name \n \
name = {FV,MV,SV,CV}.{MEAS,MODEL,NEWVAL} \n \n \
Valid name combinations \n \
{FV,MV,CV}.MEAS \n \
{SV,CV}.MODEL \n \
{FV,MV}.NEWVAL '''
# Web-server URL address
url_base = server.strip() + '/online/get_tag.php'
app = app.lower()
app.replace(" ","")
params = urllib.parse.urlencode({'p':app,'n':name})
params_en = params.encode()
f = urllib.request.urlopen(url_base,params_en)
# Send request to web-server
value = eval(f.read())
return value
def load_meas(server,app,name,value):
'''Transfer measurement to server for FV, MV, or CV \n \
server = address of server \n \
app = application name \n \
name = name of {FV,MV,CV} '''
# Web-server URL address
url_base = server.strip() + '/online/meas.php'
app = app.lower()
app.replace(" ","")
params = urllib.parse.urlencode({'p':app,'n':name+'.MEAS','v':value})
params_en = params.encode()
f = urllib.request.urlopen(url_base,params_en)
# Send request to web-server
response = f.read()
return response
def solve(app,imode):
'''
APM Solver for simulation, estimation, and optimization with both
static (steady-state) and dynamic models. The dynamic modes can solve
index 2+ DAEs without numerical differentiation.
y = solve(app,imode)
Function solve uploads the model file (apm) and optionally
a data file (csv) with the same name to the web-server and performs
a forward-time stepping integration of ODE or DAE equations
with the following arguments:
Input: app = model (apm) and data file (csv) name
imode = simulation mode {1..7}
steady-state dynamic sequential
simulate 1 4 7
estimate 2 5 8 (under dev)
optimize 3 6 9 (under dev)
Output: y.names = names of all variables
y.values = tables of values corresponding to y.names
y.nvar = number of variables
y.x = combined variables and values but variable
names may be modified to make them valid
characters (e.g. replace '[' with '')
'''
# server and application file names
server = 'http://byu.apmonitor.com'
app = app.lower()
app.replace(" ","")
app_model = app + '.apm'
app_data = app + '.csv'
# randomize the application name
from random import randint
app = app + '_' + str(randint(1000,9999))
# clear previous application
cmd(server,app,'clear all')
try:
# load model file
load_model(server,app,app_model)
except:
msg = 'Model file ' + app + '.apm does not exist'
print(msg)
return []
# check if data file exists (optional)
try:
# load data file
load_data(server,app,app_data)
except:
# data file is optional
print('Optional data file ' + app + '.csv does not exist')
pass
# default options
# use or don't use web viewer
web = False
if web:
set_option(server,app,'nlc.web',2)
else:
set_option(server,app,'nlc.web',0)
# internal nodes in the collocation (between 2 and 6)
set_option(server,app,'nlc.nodes',3)
# sensitivity analysis (default: 0 - off)
set_option(server,app,'nlc.sensitivity',0)
# simulation mode (1=ss, 2=mpu, 3=rto)
# (4=sim, 5=est, 6=nlc, 7=sqs)
set_option(server,app,'nlc.imode',imode)
# attempt solution
solver_output = cmd(server,app,'solve')
# check for successful solution
status = get_attribute(server,app,'nlc.appstatus')
if status==1:
# open web viewer if selected
if web:
web(server,app)
# retrieve solution and solution.csv
z = get_solution(server,app)
return z
else:
print(solver_output)
print('Error: Did not converge to a solution')
return []
def plotter(y, subplots=1, save=False, filename='solution', format='png'):
'''
The plotter will go through each of the variables in the output y and
create plots for them. The number of vertical subplots can be
specified and the plots can be saved in the same folder.
This functionality is dependant on matplotlib, so this library must
be installed on the computer for the automatic plotter to work.
The input y should be the output from the apm solution. This can be
retrieved from the server using the following line of code:
y = get_solution(server, app)
'''
try:
import matplotlib.pyplot as plt
var_size = len(y)
colors = ['r-', 'g-', 'k-', 'b-']
color_pick = 0
if subplots > 9:
subplots = 9
j = 1
pltcount = 0
start = True
for i in range(var_size):
if list(y)[i] != 'time' and list(y)[i][:3] != 'slk':
if j == 1:
if start != True:
plt.xlabel('time')
start = False
if save:
if pltcount != 0:
plt.savefig(filename + str(pltcount) + '.' + format, format=format)
pltcount += 1
plt.figure()
else:
plt.gca().axes.get_xaxis().set_ticklabels([])
plt.subplot(100*subplots+10+j)
plt.plot(y['time'], y[list(y)[i]], colors[color_pick], linewidth=2.0)
if color_pick == 3:
color_pick = 0
else:
color_pick += 1
plt.ylabel(list(y)[i])
if subplots == 1:
plt.title(list(y)[i])
if j == subplots or i+2 == var_size:
j = 1
else:
j += 1
plt.xlabel('time')
if save:
plt.savefig('plots/' + filename + str(pltcount) + '.' + format, format=format)
if pltcount <= 20:
plt.show()
except ImportError:
print('Dependent Packages not imported.')
print('Please install matplotlib package to use plotting features.')
except:
print('Graphs not created. Double check that the')
print('simulation/optimization was succesfull')
# This code adds back compatibility with previous versions
apm = cmd
apm_load = load_model
csv_load = load_data
apm_ip = get_ip
apm_sol = get_solution
apm_get = get_file
apm_option = set_option
apm_web = web
apm_web_var = web_var
apm_web_root = web_root
apm_info = classify
apm_tag = get_attribute
apm_meas = load_meas
apm_solve = solve
| apache-2.0 |
nrhine1/scikit-learn | sklearn/learning_curve.py | 27 | 13650 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer or cross-validation generator, optional, default=3
A cross-validation generator to use. If int, determines the number
of folds in StratifiedKFold if estimator is a classifier and the
target y is binary or multiclass, or the number of folds in KFold
otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
vermouthmjl/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 65 | 5529 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float64)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float64)
y.fill(0.0)
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float64)
y.fill(1.0)
sw = np.ones(102, dtype=np.float64)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
rabrahm/ceres | vbt/vbtutils.py | 1 | 11062 | import sys
import matplotlib
matplotlib.use("Agg")
base = '../'
sys.path.append(base+"utils/GLOBALutils")
import GLOBALutils
import numpy as np
import scipy
from astropy.io import fits as pyfits
import os
import glob
import scipy.signal
from scipy.signal import medfilt
from scipy import interpolate
import copy
from pylab import *
def get_thar_offsets(lines_thar, order_dir='wavcals/', pref='order_', suf='.iwdat', ior=20,fior=45, delt_or=3, del_width=200.,binning=1):
xcs = []
for ii in range(ior,fior):
thar_order = lines_thar[ii]
xct = []
for order in range(ii-delt_or,ii+delt_or):
order_s = str(order)
if (order < 10):
order_s = '0' + order_s
if os.access(order_dir+pref+order_s+suf,os.F_OK):
f = open(order_dir+pref+order_s+suf,'r')
llins = f.readlines()
if len(llins)>5:
pixel_centers_0 = []
for line in llins:
w = line.split()
nlines = int(w[0])
for j in range(nlines):
pixel_centers_0.append(float(w[2*j+1])*1./float(binning))
pixel_centers_0 = np.array(pixel_centers_0).astype('int')
#plot(thar_order)
#plot(pixel_centers_0,thar_order[pixel_centers_0],'ro')
#print order, order_s
#show()
ml = np.array(pixel_centers_0) - 2
mh = np.array(pixel_centers_0) + 2
if len(ml)>0:
xc,offs = GLOBALutils.XCorPix( thar_order, ml, mh, del_width=del_width)
else:
xc = np.zeros(len(offs))
else:
xc = np.array([])
if len(xct) == 0:
xct = xc.copy()
else:
xct = np.vstack((xct,xc))
if len(xcs) == 0:
xcs = xct.copy()
else:
xcs += xct
maxes, maxvels = [],[]
for i in range(xcs.shape[0]):
maxes.append(xcs[i].max())
maxvels.append(offs[np.argmax(xcs[i])])
#plot(offs,xcs[i])
#show()
maxes,maxvels = np.array(maxes),np.array(maxvels)
orders_offset = -delt_or + np.argmax(maxes)
rough_shift = maxvels[np.argmax(maxes)]
return orders_offset, rough_shift
def milk_comb(ImgList, darks, zero='Bias.fits'):
n = len(ImgList)
if n==0:
raise ValueError("empty list provided!")
h = pyfits.open(ImgList[0])[0]
d = h.data
head = pyfits.getheader(ImgList[0])
expt = head['EXPTIME']
d = OverscanTrim(d,h.header['BIASSEC'])
Master = pyfits.getdata(zero)
if len(darks) > 0:
Dark = get_dark(darks,expt)
else:
Dark = np.zeros((d.shape[0],d.shape[1]),float)
d -= Master
d -= Dark
factor = 1.25
if (n < 3):
factor = 1
ron1 = h.header['ENOISE']
gain = h.header['EGAIN']
ronoise = factor * h.header['ENOISE'] / np.sqrt(n)
if (n == 1):
return d, ron1, gain
else:
for i in range(n-1):
h = pyfits.open(ImgList[i+1])[0]
head = pyfits.getheader(ImgList[i+1])
expt = head['EXPTIME']
if len(darks) > 0:
Dark = get_dark(darks,expt)
else:
Dark = np.zeros((d.shape[0],d.shape[1]),float)
rd = OverscanTrim(h.data,h.header['BIASSEC']) - Master - Dark
d = np.dstack((d,rd/np.median(rd)))
out = np.median(d,axis=2)
return out, ronoise, gain
def FileClassify(path,log):
biases = []
objects = []
darks = []
thars = []
flat = []
f = open(log,'w')
archs = glob.glob(path+'*.fits')
if os.access(path+'bad_files.txt',os.F_OK):
ff = open(path + 'bad_files.txt', 'r')
bfiles = ff.readlines()
else:
bfiles = []
for arch in archs:
use = True
for bf in bfiles:
if arch == path + bf[:-1]:
print 'Dumped file', arch
use = False
break
if use:
h = pyfits.open(arch)
header = pyfits.getheader(arch)
name = header['OBJECT']
if True:
if header['IMAGETYP'] == 'object' or header['IMAGETYP'] == 'obj' or header['IMAGETYP'] == 'solar':
expt = header['EXPTIME']
try:
ra = header['TELRA']
except:
ra = header['RA']
try:
dec = header['TELDEC']
except:
dec = header['DEC']
ams = header['AIRMASS']
date = header['DATE-OBS'].split('T')[0]
UT = header['DATE-OBS'].split('T')[1]
line = "%-15s %10s %10s %8.2f %4.2f %8s %8s %s\n" % (name, ra, dec, expt, ams, date, UT, arch)
f.write(line)
objects.append(arch)
elif header['IMAGETYP'] == 'comp':
thars.append(arch)
elif header['IMAGETYP'] == 'zero':
biases.append(arch)
elif header['IMAGETYP'] == 'flat':
flat.append(arch)
elif header['IMAGETYP'] == 'dark':
darks.append(arch)
h.close()
f.close()
return biases, flat, objects, thars, darks
def MedianCombine(ImgList, zero_bo=False, zero='Bias.fits', dark_bo=False, darks=[], flat_bo=False, flat='Flat.fits',bsec=[0,50,4146,4196]):
"""
Median combine a list of images
"""
n = len(ImgList)
if n==0:
raise ValueError("empty list provided!")
h = pyfits.open(ImgList[0])[0]
d = h.data[0]
d = OverscanTrim(d,bsec)
if zero_bo:
Master = pyfits.getdata(zero)
else:
Master = np.zeros((d.shape[0],d.shape[1]),float)
if dark_bo and len(darks)!=0:
hd = pyfits.getheader(ImgList[0])
time = hd['EXPTIME']
Dark = get_dark(darks, time)
else:
Dark = np.zeros((d.shape[0],d.shape[1]),float)
if flat_bo:
Flat = pyfits.getdata(flat)
else:
Flat = np.zeros((d.shape[0],d.shape[1]),float) + 1.0
if flat_bo:
d = (d - Master - Dark)/Flat
else:
d = (d - Master - Dark)
factor = 1.25
if (n < 3):
factor = 1
ronoise = factor * h.header['RDNOISE'] / np.sqrt(n)
gain = h.header['GAIN']
if (n == 1):
return d, ronoise, gain
else:
for i in range(n-1):
h = pyfits.open(ImgList[i+1])[0]
if flat_bo:
d = np.dstack((d,(OverscanTrim(h.data[0],bsec)-Master-Dark)/Flat))
else:
d = np.dstack((d,OverscanTrim(h.data[0],bsec)-Master-Dark))
return np.median(d,axis=2), ronoise, gain
def OverscanTrim(d,bsec,bin=1.):
"""
Overscan correct and Trim a refurbished DuPont image
"""
t1 = d[:,bsec[0]:bsec[1]]
t2 = d[:,bsec[2]:bsec[3]]
nd = d[:,bsec[1]:bsec[2]].copy()
overscan1 = np.median(np.dstack((t1,t2)))
newdata = nd - overscan1
return newdata
def get_dark(darks,t):
exact = 0
dts = []
for dark in darks:
hd = pyfits.getheader(dark)
dt = hd['EXPTIME']
dts.append(dt)
if dt == t:
DARK = pyfits.getdata(dark)
exact = 1
dts = np.array(dts)
if exact == 0:
if t < dts.min():
I = np.where( dts == dts.min() )[0]
DARK = pyfits.getdata(darks[I[0]])*t/dts[I[0]]
elif t > dts.max():
I = np.where( dts == dts.max() )[0]
DARK = pyfits.getdata(darks[I[0]])*t/dts[I[0]]
else:
tmin = dts.min()
tmax = dts.max()
I = np.where( dts == dts.min() )[0]
Dmin = pyfits.getdata(darks[I[0]])
Dminname=darks[I[0]]
I = np.where( dts == dts.max() )[0]
Dmax = pyfits.getdata(darks[I[0]])
Dmaxname = darks[I[0]]
i = 0
while i < len(dts):
if dts[i] < t and dts[i] > tmin:
tmin = dts[i]
Dminname = darks[i]
Dmin = pyfits.getdata(darks[i])
elif dts[i] > t and dts[i] < tmax:
tmax = dts[i]
Dmaxname = darks[i]
Dmax = pyfits.getdata(darks[i])
i+=1
num = Dmax - Dmin
den = tmax-tmin
m = num/den
n = Dmax - m*tmax
DARK = m*t+n
return DARK
def get_blaze(LL,FF, low=1.0, hi=3.0, n = 6):
NF = FF.copy()
for j in range(LL.shape[0]):
L = LL[j]
F = FF[j]
ejex = np.arange(len(F))
F[:150] = 0.0
F[-150:] = 0.0
Z = np.where(F!=0)[0]
F = scipy.signal.medfilt(F[Z],31)
ejexx = ejex.copy()
ejex = ejex[Z]
L = L[Z]
I = np.where((L>5870) & (L<5890))[0]
if len(I)>0:
W = np.where(L<5870)[0]
R = np.where(L>5890)[0]
ejetemp = np.hstack((ejex[W],ejex[R]))
Ftemp = np.hstack((F[W],F[R]))
coefs = np.polyfit(ejetemp,Ftemp,n)
fit = np.polyval(coefs,ejetemp)
else:
ejetemp=ejex
Ftemp=F
coefs = np.polyfit(ejex,F,n)
fit = np.polyval(coefs,ejex)
i = 0
while i < 30:
res = Ftemp - fit
IP = np.where((res>=0) & (Ftemp!=0.0))[0]
IN = np.where((res<0) & (Ftemp!=0.0))[0]
devp = np.mean(res[IP])
devn = np.mean(res[IN])
I = np.where((res > -low*abs(devn)) & (res < hi*abs(devp)) & (Ftemp!=0))[0]
coefs = np.polyfit(ejetemp[I],Ftemp[I],n)
fit = np.polyval(coefs,ejetemp)
i+=1
fit = np.polyval(coefs,ejexx)
NF[j]=fit
NNF = NF.copy()
for j in range(LL.shape[0]):
L = LL[j]
I = np.where((L>6520) & (L<6600))[0]
if len(I)>0:
if j+2 < LL.shape[0]:
for i in range(len(L)):
vec = np.array([NF[j-2,i],NF[j-1,i],NF[j+1,i],NF[j+2,i]])
tck = scipy.interpolate.splrep(np.array([0.0,1.0,3.0,4.0]),vec,k=2)
NNF[j,i] = scipy.interpolate.splev(2.0,tck,der=0)
elif j+1 < LL.shape[0]:
for i in range(len(L)):
vec = np.array([NF[j-2,i],NF[j-1,i],NF[j+1,i]])
tck = scipy.interpolate.splrep(np.array([0.0,1.0,3.0]),vec,k=1)
NNF[j,i] = scipy.interpolate.splev(2.0,tck,der=0)
elif j < LL.shape[0]:
for i in range(len(L)):
vec = np.array([NF[j-3,i],NF[j-2,i],NF[j-1,i]])
tck = scipy.interpolate.splrep(np.array([0.0,1.0,2.0]),vec,k=1)
NNF[j,i] = scipy.interpolate.splev(3.0,tck,der=0)
I = np.where((L>4870) & (L<4880))[0]
if len(I)>0:
if j+2 < LL.shape[0]:
for i in range(len(L)):
vec = np.array([NF[j-2,i],NF[j-1,i],NF[j+1,i],NF[j+2,i]])
tck = scipy.interpolate.splrep(np.array([0.0,1.0,3.0,4.0]),vec,k=2)
NNF[j,i] = scipy.interpolate.splev(2.0,tck,der=0)
elif j+1 < LL.shape[0]:
for i in range(len(L)):
vec = np.array([NF[j-2,i],NF[j-1,i],NF[j+1,i]])
tck = scipy.interpolate.splrep(np.array([0.0,1.0,3.0]),vec,k=1)
NNF[j,i] = scipy.interpolate.splev(2.0,tck,der=0)
else:
for i in range(len(L)):
vec = np.array([NF[j-3,i],NF[j-2,i],NF[j-1,i]])
tck = scipy.interpolate.splrep(np.array([0.0,1.0,2.0]),vec,k=1)
NNF[j,i] = scipy.interpolate.splev(3.0,tck,der=0)
I = np.where((L>4320) & (L<4325))[0]
if len(I)>0:
if j+2 < LL.shape[0]:
for i in range(len(L)):
vec = np.array([NF[j-2,i],NF[j-1,i],NF[j+1,i],NF[j+2,i]])
tck = scipy.interpolate.splrep(np.array([0.0,1.0,3.0,4.0]),vec,k=2)
NNF[j,i] = scipy.interpolate.splev(2.0,tck,der=0)
return NNF
def get_close(tht,rat,dect,fits):
t0 = 1000000.
close = fits[0]
for fit in fits:
#print close
hd = pyfits.getheader(fit)
sct,mjd0 = mjd_fromheader(hd)
expt = hd['EXPTIME']/(3600.*24.)
dec = hd['DEC-D']
ra = hd['RA-D']
if abs(dec - dect)<0.05 and abs(ra - rat)<0.05:
#print sct+expt,tht
if abs(sct+expt-tht) < t0:
t0 = abs(sct+expt-tht)
close = fit
return close
def b_col(d):
d[:,746] = 0.5*(d[:,745]+d[:,748])
d[:,747] = 0.5*(d[:,745]+d[:,748])
return d
def mjd_fromheader(h):
"""
return modified Julian date from header
"""
datetu = h['DATE-OBS'].split('T')[0]
timetu = h['DATE-OBS'].split('T')[1]
mjd0,mjd,i = GLOBALutils.iau_cal2jd(int(datetu[:4]),int(datetu[5:7]),int(datetu[8:]))
ho = int(timetu[:2])
mi = int(timetu[3:5])
se = float(timetu[7:])
ut = float(ho) + float(mi)/60.0 + float(se)/3600.0
mjd_start = mjd + ut/24.0
secinday = 24*3600.0
fraction = 0.5
texp = h['EXPTIME'] #sec
mjd = mjd_start + (fraction * texp) / secinday
return mjd, mjd0
| mit |
doncat99/StockRecommendSystem | Source/FetchData/Fetch_Data_Stock_US_Short.py | 1 | 3659 | import os, requests, time, datetime, configparser, warnings
from bs4 import BeautifulSoup
import pandas as pd
from Fetch_Data_Stock_US_Daily import getStocksList
import concurrent.futures
from tqdm import tqdm
def getSignleStockShortInfo(stock):
df = pd.DataFrame()
url = "http://shortsqueeze.com/?symbol=" + stock + "&submit=Short+Quote%E2%84%A2"
repeat_times = 3
downloadFailed = True
for _ in range(repeat_times):
try:
response = requests.get(url, timeout=15)
downloadFailed = False
break
except Exception as e:
print ("exception in get stock:" + stock, str(e))
continue
if downloadFailed:
return "", df
try:
tables = pd.read_html(response.text, attrs={'cellpadding': '3', 'width': '100%'})
except Exception as e:
print ("exception in parse stock:" + stock, str(e))
return "", df
for table in tables:
if df.empty:
df = table
else:
df = pd.concat([df, table])
df = df.reset_index(drop=True, inplace=True)
#print(df)
soup = BeautifulSoup(response.text, 'lxml')
dateString = soup.find('span', {"style" : "color:#999999;font-family: verdana, arial, helvetica;font-size:10px"}).get_text()
date = datetime.datetime.strptime(dateString, '%A %B %d, %Y')
return date, df.T
def updateStockShortData_US():
Config = configparser.ConfigParser()
Config.read("../../config.ini")
dir = Config.get('Paths', 'SHORT_US')
if os.path.exists(dir) == False:
os.makedirs(dir)
stocklist = getStocksList()['symbol'].values.tolist()
symbols = stocklist
pbar = tqdm(total=len(symbols))
log_errors = []
log_update = []
# debug only
short_df = pd.DataFrame()
for stock in symbols:
startTime = time.time()
date, df = getSignleStockShortInfo(stock)
if short_df.empty:
short_df = df
else:
short_df = pd.concat([short_df, df])
outMessage = '%-*s fetched in: %.4s seconds' % (6, stock, (time.time() - startTime))
pbar.set_description(outMessage)
pbar.update(1)
short_df.to_csv(dir+date.strftime("%Y-%m-%d")+".csv")
# with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
# # Start the load operations and mark each future with its URL
# future_to_stock = {executor.submit(updateSingleStockData, dir, symbol): symbol for symbol in symbols}
# for future in concurrent.futures.as_completed(future_to_stock):
# stock = future_to_stock[future]
# try:
# startTime, message = future.result()
# except Exception as exc:
# startTime = time.time()
# log_errors.append('%r generated an exception: %s' % (stock, exc))
# len_errors = len(log_errors)
# if len_errors % 5 == 0: print(log_errors[(len_errors-5):])
# else:
# if len(message) > 0: log_update.append(message)
# outMessage = '%-*s fetched in: %.4s seconds' % (6, stock, (time.time() - startTime))
# pbar.set_description(outMessage)
# pbar.update(1)
pbar.close()
# if len(log_errors) > 0:
# print(log_errors)
# if len(log_update) > 0:
# print(log_update)
return symbols
if __name__ == "__main__":
pd.set_option('precision', 3)
pd.set_option('display.width',1000)
warnings.filterwarnings('ignore', category=pd.io.pytables.PerformanceWarning)
updateStockShortData_US() | mit |
olakiril/pipeline | python/pipeline/utils/quality.py | 5 | 4942 | import numpy as np
from sklearn.linear_model import TheilSenRegressor
from scipy import signal
def compute_quantal_size(scan):
""" Estimate the unit change in calcium response corresponding to a unit change in
pixel intensity (dubbed quantal size, lower is better).
Assumes images are stationary from one timestep to the next. Uses it to calculate a
measure of noise per bright intensity (which increases linearly given that imaging
noise is poisson), fits a line to it and uses the slope as the estimate.
:param np.array scan: 3-dimensional scan (image_height, image_width, num_frames).
:returns: int minimum pixel value in the scan (that appears a min number of times)
:returns: int maximum pixel value in the scan (that appears a min number of times)
:returns: np.array pixel intensities used for the estimation.
:returns: np.array noise variances used for the estimation.
:returns: float the estimated quantal size
:returns: float the estimated zero value
"""
# Set some params
num_frames = scan.shape[2]
min_count = num_frames * 0.1 # pixel values with fewer appearances will be ignored
max_acceptable_intensity = 3000 # pixel values higher than this will be ignored
# Make sure field is at least 32 bytes (int16 overflows if summed to itself)
scan = scan.astype(np.float32, copy=False)
# Create pixel values at each position in field
eps = 1e-4 # needed for np.round to not be biased towards even numbers (0.5 -> 1, 1.5 -> 2, 2.5 -> 3, etc.)
pixels = np.round((scan[:, :, :-1] + scan[:, :, 1:]) / 2 + eps)
pixels = pixels.astype(np.int16 if np.max(abs(pixels)) < 2 ** 15 else np.int32)
# Compute a good range of pixel values (common, not too bright values)
unique_pixels, counts = np.unique(pixels, return_counts=True)
min_intensity = min(unique_pixels[counts > min_count])
max_intensity = max(unique_pixels[counts > min_count])
max_acceptable_intensity = min(max_intensity, max_acceptable_intensity)
pixels_mask = np.logical_and(pixels >= min_intensity, pixels <= max_acceptable_intensity)
# Select pixels in good range
pixels = pixels[pixels_mask]
unique_pixels, counts = np.unique(pixels, return_counts=True)
# Compute noise variance
variances = ((scan[:, :, :-1] - scan[:, :, 1:]) ** 2 / 2)[pixels_mask]
pixels -= min_intensity
variance_sum = np.zeros(len(unique_pixels)) # sum of variances per pixel value
for i in range(0, len(pixels), int(1e8)): # chunk it for memory efficiency
variance_sum += np.bincount(pixels[i: i + int(1e8)], weights=variances[i: i + int(1e8)],
minlength=np.ptp(unique_pixels) + 1)[unique_pixels - min_intensity]
unique_variances = variance_sum / counts # average variance per intensity
# Compute quantal size (by fitting a linear regressor to predict the variance from intensity)
X = unique_pixels.reshape(-1, 1)
y = unique_variances
model = TheilSenRegressor() # robust regression
model.fit(X, y)
quantal_size = model.coef_[0]
zero_level = - model.intercept_ / model.coef_[0]
return (min_intensity, max_intensity, unique_pixels, unique_variances,
quantal_size, zero_level)
def find_peaks(trace):
""" Find local peaks in the signal and compute prominence and width at half
prominence. Similar to Matlab's findpeaks.
:param np.array trace: 1-d signal vector.
:returns: np.array with indices for each peak.
:returns: list with prominences per peak.
:returns: list with width per peak.
"""
# Get peaks (local maxima)
peak_indices = signal.argrelmax(trace)[0]
# Compute prominence and width per peak
prominences = []
widths = []
for index in peak_indices:
# Find the level of the highest valley encircling the peak
for left in range(index - 1, -1, -1):
if trace[left] > trace[index]:
break
for right in range(index + 1, len(trace)):
if trace[right] > trace[index]:
break
contour_level = max(min(trace[left: index]), min(trace[index + 1: right + 1]))
# Compute prominence
prominence = trace[index] - contour_level
prominences.append(prominence)
# Find left and right indices at half prominence
half_prominence = trace[index] - prominence / 2
for k in range(index - 1, -1, -1):
if trace[k] <= half_prominence:
left = k + (half_prominence - trace[k]) / (trace[k + 1] - trace[k])
break
for k in range(index + 1, len(trace)):
if trace[k] <= half_prominence:
right = k - 1 + (half_prominence - trace[k - 1]) / (trace[k] - trace[k - 1])
break
# Compute width
width = right - left
widths.append(width)
return peak_indices, prominences, widths | lgpl-3.0 |
pkainz/pylearn2 | pylearn2/packaged_dependencies/theano_linear/unshared_conv/test_localdot.py | 44 | 5013 | from __future__ import print_function
import nose
import unittest
import numpy as np
from theano.compat.six.moves import xrange
import theano
from .localdot import LocalDot
from ..test_matrixmul import SymbolicSelfTestMixin
class TestLocalDot32x32(unittest.TestCase, SymbolicSelfTestMixin):
channels = 3
bsize = 10 # batch size
imshp = (32, 32)
ksize = 5
nkern_per_group = 16
subsample_stride = 1
ngroups = 1
def rand(self, shp):
return np.random.rand(*shp).astype('float32')
def setUp(self):
np.random.seed(234)
assert self.imshp[0] == self.imshp[1]
fModulesR = (self.imshp[0] - self.ksize + 1) // self.subsample_stride
#fModulesR += 1 # XXX GpuImgActs crashes w/o this??
fModulesC = fModulesR
self.fshape = (fModulesR, fModulesC, self.channels // self.ngroups,
self.ksize, self.ksize, self.ngroups, self.nkern_per_group)
self.ishape = (self.ngroups, self.channels // self.ngroups,
self.imshp[0], self.imshp[1], self.bsize)
self.hshape = (self.ngroups, self.nkern_per_group, fModulesR, fModulesC,
self.bsize)
filters = theano.shared(self.rand(self.fshape))
self.A = LocalDot(filters, self.imshp[0], self.imshp[1],
subsample=(self.subsample_stride, self.subsample_stride))
self.xlval = self.rand((self.hshape[-1],) + self.hshape[:-1])
self.xrval = self.rand(self.ishape)
self.xl = theano.shared(self.xlval)
self.xr = theano.shared(self.xrval)
# N.B. the tests themselves come from SymbolicSelfTestMixin
class TestLocalDotLargeGray(TestLocalDot32x32):
channels = 1
bsize = 128
imshp = (256, 256)
ksize = 9
nkern_per_group = 16
subsample_stride = 2
ngroups = 1
n_patches = 3000
def rand(self, shp):
return np.random.rand(*shp).astype('float32')
# not really a test, but important code to support
# Currently exposes error, by e.g.:
# CUDA_LAUNCH_BLOCKING=1
# THEANO_FLAGS=device=gpu,mode=DEBUG_MODE
# nosetests -sd test_localdot.py:TestLocalDotLargeGray.run_autoencoder
def run_autoencoder(
self,
n_train_iter=10000, # -- make this small to be a good unit test
rf_shape=(9, 9),
n_filters=1024,
dtype='float32',
module_stride=2,
lr=0.01,
show_filters=True,
):
if show_filters:
# import here to fail right away
import matplotlib.pyplot as plt
try:
import skdata.vanhateren.dataset
except ImportError:
raise nose.SkipTest()
# 1. Get a set of image patches from the van Hateren data set
print('Loading van Hateren images')
n_images = 50
vh = skdata.vanhateren.dataset.Calibrated(n_images)
patches = vh.raw_patches((self.n_patches,) + self.imshp,
items=vh.meta[:n_images],
rng=np.random.RandomState(123),
)
patches = patches.astype('float32')
patches /= patches.reshape(self.n_patches, self.imshp[0] * self.imshp[1])\
.max(axis=1)[:, None, None]
# TODO: better local contrast normalization
if 0 and show_filters:
plt.subplot(2, 2, 1); plt.imshow(patches[0], cmap='gray')
plt.subplot(2, 2, 2); plt.imshow(patches[1], cmap='gray')
plt.subplot(2, 2, 3); plt.imshow(patches[2], cmap='gray')
plt.subplot(2, 2, 4); plt.imshow(patches[3], cmap='gray')
plt.show()
# -- Convert patches to localdot format:
# groups x colors x rows x cols x images
patches5 = patches[:, :, :, None, None].transpose(3, 4, 1, 2, 0)
print('Patches shape', patches.shape, self.n_patches, patches5.shape)
# 2. Set up an autoencoder
print('Setting up autoencoder')
hid = theano.tensor.tanh(self.A.rmul(self.xl))
out = self.A.rmul_T(hid)
cost = ((out - self.xl) ** 2).sum()
params = self.A.params()
gparams = theano.tensor.grad(cost, params)
train_updates = [(p, p - lr / self.bsize * gp)
for (p, gp) in zip(params, gparams)]
if 1:
train_fn = theano.function([], [cost], updates=train_updates)
else:
train_fn = theano.function([], [], updates=train_updates)
theano.printing.debugprint(train_fn)
# 3. Train it
params[0].set_value(0.001 * params[0].get_value())
for ii in xrange(0, self.n_patches, self.bsize):
self.xl.set_value(patches5[:, :, :, :, ii:ii + self.bsize], borrow=True)
cost_ii, = train_fn()
print('Cost', ii, cost_ii)
if 0 and show_filters:
self.A.imshow_gray()
plt.show()
assert cost_ii < 0 # TODO: determine a threshold for detecting regression bugs
| bsd-3-clause |
NelisVerhoef/scikit-learn | sklearn/tests/test_multiclass.py | 136 | 23649 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
| bsd-3-clause |
APPIAN-PET/APPIAN | src/utils.py | 1 | 8440 | import os
import re
import gzip
import shutil
import gzip
import subprocess
import nibabel as nib
import ntpath
import pandas as pd
import numpy as np
import tempfile
import nibabel as nib
from nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath, CommandLine, CommandLineInputSpec,
BaseInterface, OutputMultiPath, BaseInterfaceInputSpec, isdefined)
def nib_load_3d(fn):
img = nib.load(fn)
vol = img.get_data()
vol = vol.reshape(vol.shape[0:3])
img_3d = nib.Nifti1Image(vol, img.affine)
return img_3d
def cmd(command):
try:
output = subprocess.check_output(command,stderr=subprocess.STDOUT, shell=True, universal_newlines=True)
except subprocess.CalledProcessError as exc:
print("Status : FAIL", exc.returncode, exc.output)
exit(1)
else:
print("Output: \n{}\n".format(output))
def splitext(s):
try :
ssplit = os.path.basename(s).split('.')
ext='.'+'.'.join(ssplit[1:])
basepath= re.sub(ext,'',s)
return [basepath, ext]
except TypeError :
return s
def gz(ii, oo):
with open(ii, 'rb') as in_file:
with gzip.open(oo, 'wb') as out_file:
shutil.copyfileobj(in_file, out_file)
def gunzip(ii, oo):
with gzip.open(ii, 'rb') as in_file:
with open(oo, 'wb') as out_file:
shutil.copyfileobj(in_file, out_file)
def check_gz(in_file_fn) :
img, ext = splitext(in_file_fn)
if '.gz' in ext :
out_file_fn = tempfile.mkdtemp() + os.path.basename(img) + '.nii'
sif = img + '.sif'
if os.path.exists(sif) :
shutil.copy(sif, '/tmp/'+os.path.basename(img)+'.sif' )
gunzip(in_file_fn, out_file_fn)
return out_file_fn
else :
return in_file_fn
class separate_mask_labelsOutput(TraitedSpec):
out_file=traits.File(argstr="%s", desc="4D label image")
class separate_mask_labelsInput(TraitedSpec):
in_file=traits.File(argstr="%s", desc="3D label image")
out_file=traits.File(argstr="%s", desc="4D label image")
class separate_mask_labelsCommand(BaseInterface ):
input_spec = separate_mask_labelsInput
output_spec = separate_mask_labelsOutput
def _run_interface(self, runtime):
vol = nib.load(self.inputs.in_file)
data = vol.get_data()
data = data.reshape(*data.shape[0:3])
if not isdefined(self.inputs.out_file) :
self.inputs.out_file = self._gen_outputs(self.inputs.in_file)
unique = np.unique( data ).astype(int)
nUnique = len(unique)-1
out = np.zeros( [data.shape[0], data.shape[1], data.shape[2], nUnique] )
print('unique', unique)
print('shape',out.shape)
print('data', data.shape)
for t,i in enumerate( unique ) :
if i != 0 :
print(t-1, i )
out[ data == i, t-1 ] = 1
out_file=nib.Nifti1Image(out, vol.get_affine(), vol.header)
out_file.to_filename(self.inputs.out_file)
return(runtime)
def _gen_outputs(self, fn) :
fn_split = splitext(fn)
return os.getcwd() + os.sep + os.path.basename( fn_split[0] ) + "_4d" + fn_split[1]
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_file) :
self.inputs.out_file = self._gen_outputs(self.inputs.in_file)
outputs["out_file"] = self.inputs.out_file
return outputs
class concat_dfOutput(TraitedSpec):
out_file = traits.File(desc="Output file")
class concat_dfInput(BaseInterfaceInputSpec):
in_list = traits.List(mandatory=True, exists=True, desc="Input list")
out_file = traits.File(mandatory=True, desc="Output file")
test = traits.Bool(default=False, usedefault=True, desc="Flag for if df is part of test run of pipeline")
class concat_df(BaseInterface):
input_spec = concat_dfInput
output_spec = concat_dfOutput
def _run_interface(self, runtime):
df=pd.DataFrame([])
test = self.inputs.test
for f in self.inputs.in_list:
dft = pd.read_csv(f)
df = pd.concat([df, dft], axis=0)
#if test : print df
df.to_csv(self.inputs.out_file, index=False)
return(runtime)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = os.getcwd() + os.sep + self.inputs.out_file
return outputs
class ConcatOutput(TraitedSpec):
out_file = File(exists=True, desc="resampled image")
class ConcatInput(CommandLineInputSpec):
in_file = InputMultiPath(File(mandatory=True), position=0, argstr='%s', desc='List of input images.')
out_file = File(position=1, argstr="%s", mandatory=True, desc="Output image.")
dimension = traits.Str(argstr="-concat_dimension %s", desc="Concatenate along a given dimension.")
start = traits.Float(argstr="-start %s", desc="Starting coordinate for new dimension.")
step = traits.Float(argstr="-step %s", desc="Step size for new dimension.")
clobber = traits.Bool(argstr="-clobber", usedefault=True, default_value=True, desc="Overwrite output file")
verbose = traits.Bool(argstr="-verbose", usedefault=True, default_value=True, desc="Write messages indicating progress")
class copyOutput(TraitedSpec):
output_file=traits.File(argstr="%s", desc="input")
class copyInput(TraitedSpec):
input_file=traits.File(argstr="%s", desc="input")
output_file=traits.File(argstr="%s", desc="output")
class copyCommand(BaseInterface ):
input_spec = copyInput
output_spec = copyOutput
def _run_interface(self, runtime):
if not isdefined(self.inputs.output_file) :
self.inputs.output_file = self._gen_output(self.inputs.input_file)
shutil.copy(self.inputs.input_file, self.inputs.output_file)
return(runtime)
def _gen_output(self, fn) :
return os.getcwd() + os.sep + os.path.basename( fn )
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.output_file) :
self.inputs.output_file = self._gen_output(self.inputs.input_file)
outputs["output_file"] = self.inputs.output_file
return outputs
#In theory, this information should be contained in the header.
# Often, however, the information will either not be present in the header or it will be saved under an unexpected variable name (e.g., "Patient_Weight", "body_weight", "weight" ).
# One way around this problem is to allow the user to create a .csv file with the subject
#name and the parameter of interest. This way, if the paramter cannot be read from the header, it can still be read from the text file.
class subject_parameterOutput(TraitedSpec):
parameter=traits.String(argstr="%s", desc="Subject parameter")
class subject_parameterInput(TraitedSpec):
parameter_name=traits.String(argstr="%s", desc="File containing subject parameters")
header = traits.Dict(desc="Python dictionary containing PET header")
parameter=traits.String(argstr="%s", desc="Subject parameter")
sid=traits.String(desc="Subject ID")
class subject_parameterCommand(BaseInterface ):
input_spec = subject_parameterInput
output_spec = subject_parameterOutput
def _run_interface(self, runtime):
parameter_name = self.inputs.parameter_name
header = self.inputs.header
sid = self.inputs.sid
if os.path.exists(parameter_name):
#Case 1: paramter_name is a file name containing the subjects and parameters
# --> attempt to extract parameter from header
df=pd.read_csv(parameter_name, header=None)
parameter=df.iloc[:, 1][ df.iloc[:,0] == sid ].values[0]
#Case 2: parameter_name is a string representing the name of the parameter
else:
parameter=_finditem(header, parameter_name)
if type(parameter) == list:
parameter=parameter[0]
#convert scientific notation number to floating point number, stored as string
try:
parameter=format(float(parameter), 'f')
except ValueError: pass
self.inputs.parameter=str(parameter)
return(runtime)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["parameter"] = self.inputs.parameter
return outputs
| mit |
tracierenea/gnuradio | gr-filter/examples/chirp_channelize.py | 58 | 7169 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 200000 # number of samples to use
self._fs = 9000 # initial sampling rate
self._M = 9 # Number of channels to channelize
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._fs, 500, 20,
attenuation_dB=10,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
repeated = True
if(repeated):
self.vco_input = analog.sig_source_f(self._fs, analog.GR_SIN_WAVE, 0.25, 110)
else:
amp = 100
data = scipy.arange(0, amp, amp/float(self._N))
self.vco_input = blocks.vector_source_f(data, False)
# Build a VCO controlled by either the sinusoid or single chirp tone
# Then convert this to a complex signal
self.vco = blocks.vco_f(self._fs, 225, 1)
self.f2c = blocks.float_to_complex()
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.vco_input, self.vco, self.f2c)
self.connect(self.f2c, self.head, self.pfb)
self.connect(self.f2c, self.snk_i)
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(blocks.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
fig3 = pylab.figure(4, figsize=(16,9), facecolor="w")
Ns = 650
Ne = 20000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs / tb._M
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = freq
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
sp3 = fig3.add_subplot(1,1,1)
p3 = sp3.plot(t_o, x_o.real)
sp3.set_xlim([min(t_o), max(t_o)+1])
sp3.set_ylim([-2, 2])
sp3.set_title("All Channels")
sp3.set_xlabel("Time (s)")
sp3.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/calibration/plot_compare_calibration.py | 1 | 5011 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probabilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilities to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilities closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name,))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| mit |
hsiaoyi0504/scikit-learn | examples/plot_multilabel.py | 87 | 4279 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
xiaoxiamii/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
fxb22/BioGUI | plugins/Views/GEViewPlugins/HeatMap.py | 1 | 4003 | import wx
import plotter as mpl
import numpy as np
import matplotlib.pyplot as plt
class Plugin():
def OnSize(self):
self.bPSize = self.coverPanel.GetSize()
self.plotter.Show(False)
self.plotter.SetSize((self.bPSize[1], self.bPSize[1]))
self.plotter.SetPosition(((self.bPSize[0]-self.bPSize[1])/2, 0))
self.plotter.resize([3.05 / 244, 3.05 / 244])
self.plotter.Show(True)
self.DoDraw(wx.EVT_IDLE)
def GetParamList(self):
return self.rec
def Clear(self):
for o in self.coverPanel.GetChildren():
o.Show(False)
o.Destroy()
def GetGeMat(self):
return self.geMat
def GetExec(self, f, coverPanel, rec, geMat, colorList):
self.rec = rec
self.coverPanel = coverPanel
self.geMat = geMat
self.bPSize = self.coverPanel.GetSize()
self.plotter = mpl.PlotNotebook(self.coverPanel,
size = (self.bPSize[1], self.bPSize[1] * 1.08),
pos = ((self.bPSize[0]-self.bPSize[1])/2,
-self.bPSize[1] * .08))
self.plotter.Show(True)
self.axes1 = self.plotter.add('figure 1').gca()
colMat = GetExec(self.rec, geMat, 'tri')
self.Z3 = np.transpose(np.array(colMat))
plt.spectral()
self.DoDraw()
return self.geMat
def DoDraw(self):
self.axes1.pcolor(self.Z3, vmin=-.9, vmax=1.1)
lab = []
for r in self.rec:
for a in r:
lab.append(a)
self.axes1.set_xticks(np.arange(self.Z3.shape[0])+0.5, minor=False)
self.axes1.set_xticklabels(lab, rotation = -20,
size = int(self.bPSize[1] * .02))
self.axes1.set_yticks([],False)
self.axes1.set_xlim(0,len(self.geMat)-1)
self.axes1.set_ylim(0,len(self.geMat[0]))
self.plotter.Show(True)
self.plotter.resize([3.05 / 244, 3.05 / 244])
def BitCompare(meanIn, meanOut):
colMat = []
for i,m in enumerate(meanIn):
if m >= meanOut[i]:
colMat.append(1)
else:
colMat.append(0)
return colMat
def TempMat(meanIn, meanOut, inMat, outMat):
ti = inMat
to = outMat
i = 0
j = len(ti) - 1
for check,val in enumerate(meanIn):
if val > meanOut[check]:
for q,row in enumerate(ti):
row[i] = inMat[q][check]
for q,row in enumerate(to):
row[i] = outMat[q][check]
i += 1
else:
for q,row in enumerate(ti):
row[j] = inMat[q][check]
for q,row in enumerate(to):
row[j] = outMat[q][check]
j -= 1
tempMat = []
for row in ti:
tempMat.append(row)
for row in to:
tempMat.append(row)
return np.array(tempMat)
def TriCompare(Z):
colMat = Z
meanZ = np.mean(Z, dtype=np.float64, axis=0)
stdZ = np.std(Z, dtype=np.float64, axis=0)
for i,row in enumerate(Z):
for j,val in enumerate(row):
if ((val - meanZ[j]) / stdZ[j]) > -.5:
if ((val - meanZ[j]) / stdZ[j]) > .5:
colMat[i][j] = 1
else:
colMat[i][j] = -1
else:
colMat[i][j] = 0
i += 1
return colMat
def GetExec(rec, geMat, comp):
inMat = []
outMat = []
for row in geMat[:-1]:
if row[0] in rec[0]:
inMat.append(row[1:])
else:
outMat.append(row[1:])
t = np.array(inMat)
meanIn = np.mean(t, dtype=np.float64, axis=0)
meanOut = np.mean(np.array(outMat), dtype=np.float64, axis=0)
if comp == 'bit':
colMat = BitCompare(meanIn, meanOut)
elif comp == 'tri':
Z = TempMat(meanIn, meanOut, inMat, outMat)
colMat = TriCompare(Z)
return colMat
def GetName():
return "Heat Map"
| gpl-2.0 |
cajohnst/Optimized_FX_Portfolio | fxstreet_google_sheet.py | 1 | 3172 | import gspread
import pandas as pd
from oauth2client.service_account import ServiceAccountCredentials
import datetime
from datetime import date, timedelta
import os
import fxstreet_scraper
import StringIO
import csv
import settings as sv
on_heroku = False
if 'DYNO' in os.environ:
on_heroku = True
def main():
wks = setup_credentials()
if on_heroku:
update_spreadsheet(wks)
else:
request = raw_input('Enter Y to update the fxstreet spreadsheet: ')
if request is 'Y' or request is 'y':
update_spreadsheet(wks)
def setup_credentials():
scope = ['https://spreadsheets.google.com/feeds']
if on_heroku:
keyfile_dict = setup_keyfile_dict()
credentials = ServiceAccountCredentials.from_json_keyfile_dict(keyfile_dict, scope)
else:
credentials = ServiceAccountCredentials.from_json_keyfile_name('My Project-3b0bc29d35d3.json', scope)
gc = gspread.authorize(credentials)
wks = gc.open_by_key("1GnVhFp0s28HxAEOP6v7kmfmt3yPL_TGJSV2mcn1RPMY").sheet1
return wks
def setup_keyfile_dict():
keyfile_dict = dict()
keyfile_dict['type'] = os.environ.get('TYPE')
keyfile_dict['client_email'] = os.environ.get('CLIENT_EMAIL')
keyfile_dict['private_key'] = unicode(os.environ.get('PRIVATE_KEY').decode('string_escape'))
keyfile_dict['private_key_id'] = os.environ.get('PRIVATE_KEY_ID')
keyfile_dict['client_id'] = os.environ.get('CLIENT_ID')
return keyfile_dict
def bootstrap_sheet(wks):
# If new spreadsheet, update current row indicator
if wks.acell('A1').value == '':
wks.update_acell('A1', 2)
def update_spreadsheet(wks):
num_rows = wks.row_count
bootstrap_sheet(wks)
current_row = int(wks.acell('A1').value)
csv_data = fxstreet_scraper.main()
csv_data = StringIO.StringIO(csv_data)
csv_reader = csv.reader(csv_data)
for csv_index, row in enumerate(csv_reader):
# are we in the first row of the csv data? aka(column names)
if csv_index == 0:
# Check if we have an empty spreadsheet
if current_row > 2:
continue
# row contains actual data
else:
row[0] = row[0].split(' ')[0]
if current_row > num_rows:
wks.append_row(row)
num_rows += 1
else:
cell_list = wks.range('A' + str(current_row) + ':G' + str(current_row))
for row_index, data in enumerate(row):
cell_list[row_index].value = data
wks.update_cells(cell_list)
current_row += 1
wks.update_acell('A1', current_row)
def pull_data(num_days):
start_date = sv.end_date - timedelta(num_days)
wks = setup_credentials()
csv_file = wks.export(format='csv')
csv_buffer = StringIO.StringIO(csv_file)
fxstreet_data = pd.read_csv(csv_buffer, header=1, index_col=0, parse_dates=True, infer_datetime_format=True)
filtered_data = fxstreet_data.ix[start_date:sv.end_date]
return filtered_data
def increment_letter(letter, amount):
cur = ord(letter)
return chr(cur+amount)
if __name__ == "__main__":
main()
| mit |
nealchenzhang/Py4Invst | Backtest_Futures/data.py | 1 | 7656 | # -*- coding: utf-8 -*-
# data.py
from abc import ABCMeta, abstractmethod
import datetime
import os
import numpy as np
import pandas as pd
from Data.Futures_Data.MongoDB_Futures import df_fromMongoDB
from Backtest_Futures.event import MarketEvent
class DataHandler(object):
"""
DataHandler is an abstract base class providing an interface for
all subsequent (inherited) data handlers (both live and historic).
The goal of a (derived) DataHandler object is to output a generated
set of bars (OHLCVOI) for each symbol requested.
This will replace how a live strategy would function as current
market data would be sent "down the pipe". Thus a historic and live
system will be treated identically by the rest of the backtesting suite.
"""
__metaclass__ = ABCMeta
@abstractmethod
def get_latest_bar(self, symbol):
"""
Returns the last bar updated.
:param symbol:
:return:
"""
raise NotImplementedError("Should implement get_latest_bar()")
@abstractmethod
def get_latest_bars(self, symbol, N=1):
"""
Returns the last N bars updated.
:param symbol:
:param N:
:return:
"""
raise NotImplementedError("Should implement get_latest_bars()")
@abstractmethod
def get_latest_bar_datetime(self, symbol):
"""
Returns a Python datetime object for the last bar.
:param symbol:
:return:
"""
raise NotImplementedError("Should implement get_latest_bar_datetime()")
@abstractmethod
def get_latest_bar_value(self, symbol, val_type):
"""
Returns one of the Open, High, Low, Close, Volume or OI
from the last bar.
:param symbol:
:param val_type:
:return:
"""
raise NotImplementedError("Should implement get_latest_bar_value()")
@abstractmethod
def get_latest_bars_values(self, symbol, val_type, N=1):
"""
Returns the last N bar values from the
latest_symbol list, or N-k if less available.
:param symbol:
:param val_type:
:return:
"""
raise NotImplementedError("Should implement get_latest_bars_values()")
@abstractmethod
def update_bars(self):
"""
Pushes the latest bars to the bars_queue for each symbol
in a tuple OHLCVOP format: (datetime, Open, High, Low,
Close, Volume, OpenInterest).
:return:
"""
raise NotImplementedError("Should implement update_bars()")
class HistoricalMongoDataHandler(DataHandler):
"""
HistoricalMongoDataHandler is designed to read historical data for
each requested symbol from MongoDB and provide an interface to obtain
the "latest" bar in a manner identical to a live trading interface.
"""
def __init__(self, events, dbname, symbol_list):
"""
Initializes the historic data handler by requesting the Mongo
DataBase and a list of symbols.
It will be assumed that all database name are of the form of
"1min_CTP", "tick_CTP" or other time period.
:param events: The Event Queue.
:param dbname: The Database name for different time period.
:param symbol_list: A list of symbol strings
"""
self.events = events
self.dbname = dbname
self.symbol_list = symbol_list
self.symbol_data = {}
self.latest_symbol_data = {}
self.continue_backtest = True
self._retrieve_mongodb_data()
def _retrieve_mongodb_data(self):
"""
Retrieves the mongodb from the DB, converting them into pandas
DataFrames within a symbol dictionary.
For this handler it will be assumed that the database structure
is designed as Data Directory.
:return:
"""
comb_index = None
for s in self.symbol_list:
# Load the data from symbol database for specific time period,
# indexed on datetime
self.symbol_data[s] = df_fromMongoDB(self.dbname, s)
# Combine the index to pad forward values
if comb_index is None:
comb_index = self.symbol_data[s].index
else:
comb_index.union(self.symbol_data[s].index)
# Set the latest symbol_data to None
self.latest_symbol_data[s] = []
# Reindex the dataframes
for s in self.symbol_list:
self.symbol_data[s] = self.symbol_data[s].\
reindex(index=comb_index, method='pad').iterrows()
def _get_new_bar(self, symbol):
"""
Returns the latest bar from the data feed.
:param symbol:
:return:
"""
for b in self.symbol_data[symbol]:
yield b
def get_latest_bar(self, symbol):
"""
Returns the last bar from the latest_symbol list.
:param symbol:
:return:
"""
try:
bars_list = self.latest_symbol_data[symbol]
except KeyError:
print("That symbol is not available in the historical data set.")
raise
else:
return bars_list[-1]
def get_latest_bars(self, symbol, N=1):
"""
Returns the last N bars from the latest_symbol list,
or N-k if less available.
:param symbol:
:return:
"""
try:
bars_list = self.latest_symbol_data[symbol]
except KeyError:
print("That symbol is not available in the historical data set.")
raise
else:
return bars_list[-N:]
def get_latest_bar_datetime(self, symbol):
"""
Returns a Python datetime object for the last bar.
:param symbol:
:return:
"""
try:
bars_list = self.latest_symbol_data[symbol]
except KeyError:
print("That symbol is not available in the historical data set.")
raise
else:
return bars_list[-1][0]
def get_latest_bar_value(self, symbol, val_type):
"""
Returns one of the Open, High, Low, Close, Volume or OI
values from the pandas Bar series object.
:param symbol:
:param val_type:
:return:
"""
try:
bars_list = self.latest_symbol_data[symbol]
except KeyError:
print("That symbol is not available in the historical data set.")
raise
else:
return getattr(bars_list[-1][1], val_type)
def get_latest_bars_values(self, symbol, val_type, N=1):
"""
Returns the last N bar values from the
latest_symbol list, or N-k if less available.
:param symbol:
:param val_type:
:param N:
:return:
"""
try:
bars_list = self.get_latest_bars(symbol, N)
except KeyError:
print("That symbol is not available in the historical data set.")
raise
else:
return np.array([getattr(b[1], val_type) for b in bars_list])
def update_bars(self):
"""
Pushes the latest bar to the latest_symbol_data structure
for all symbols in the symbol list.
"""
for s in self.symbol_list:
try:
bar = next(self._get_new_bar(s))
except StopIteration:
self.continue_backtest = False
else:
if bar is not None:
self.latest_symbol_data[s].append(bar)
self.events.put(MarketEvent()) | mit |
hainm/dask | dask/array/tests/test_percentiles.py | 8 | 1486 | import pytest
pytest.importorskip('numpy')
from dask.utils import skip
import dask.array as da
from dask.array.percentile import _percentile
import dask
import numpy as np
def eq(a, b):
if isinstance(a, da.Array):
a = a.compute(get=dask.get)
if isinstance(b, da.Array):
b = b.compute(get=dask.get)
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def test_percentile():
d = da.ones((16,), chunks=(4,))
assert eq(da.percentile(d, [0, 50, 100]), [1, 1, 1])
x = np.array([0, 0, 5, 5, 5, 5, 20, 20])
d = da.from_array(x, chunks=(3,))
assert eq(da.percentile(d, [0, 50, 100]), [0, 5, 20])
x = np.array(['a', 'a', 'd', 'd', 'd', 'e'])
d = da.from_array(x, chunks=(3,))
assert eq(da.percentile(d, [0, 50, 100]), ['a', 'd', 'e'])
@skip
def test_percentile_with_categoricals():
try:
import pandas as pd
except ImportError:
return
x0 = pd.Categorical(['Alice', 'Bob', 'Charlie', 'Dennis', 'Alice', 'Alice'])
x1 = pd.Categorical(['Alice', 'Bob', 'Charlie', 'Dennis', 'Alice', 'Alice'])
dsk = {('x', 0): x0, ('x', 1): x1}
x = da.Array(dsk, 'x', chunks=((6, 6),))
p = da.percentile(x, [50])
assert (p.compute().categories == x0.categories).all()
assert (p.compute().codes == [0]).all()
def test_percentiles_with_empty_arrays():
x = da.ones(10, chunks=((5, 0, 5),))
assert da.percentile(x, [10, 50, 90]).compute().tolist() == [1, 1, 1]
| bsd-3-clause |
rs2/pandas | pandas/tests/extension/test_integer.py | 2 | 7327 | """
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import numpy as np
import pytest
from pandas.core.dtypes.common import is_extension_array_dtype
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension import base
def make_data():
return list(range(1, 9)) + [pd.NA] + list(range(10, 98)) + [pd.NA] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_for_twos(dtype):
return integer_array(np.ones(100) * 2, dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([pd.NA, 1], dtype=dtype)
@pytest.fixture
def data_for_sorting(dtype):
return integer_array([1, 2, 0], dtype=dtype)
@pytest.fixture
def data_missing_for_sorting(dtype):
return integer_array([1, pd.NA, 0], dtype=dtype)
@pytest.fixture
def na_cmp():
# we are pd.NA
return lambda x, y: x is pd.NA and y is pd.NA
@pytest.fixture
def na_value():
return pd.NA
@pytest.fixture
def data_for_grouping(dtype):
b = 1
a = 0
c = 2
na = pd.NA
return integer_array([b, b, na, na, a, a, b, c], dtype=dtype)
class TestDtype(base.BaseDtypeTests):
@pytest.mark.skip(reason="using multiple dtypes")
def test_is_dtype_unboxes_dtype(self):
# we have multiple dtypes, so skip
pass
class TestArithmeticOps(base.BaseArithmeticOpsTests):
def check_opname(self, s, op_name, other, exc=None):
# overwriting to indicate ops don't raise an error
super().check_opname(s, op_name, other, exc=None)
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
if s.dtype.is_unsigned_integer and (op_name == "__rsub__"):
# TODO see https://github.com/pandas-dev/pandas/issues/22023
pytest.skip("unsigned subtraction gives negative values")
if (
hasattr(other, "dtype")
and not is_extension_array_dtype(other.dtype)
and pd.api.types.is_integer_dtype(other.dtype)
):
# other is np.int64 and would therefore always result in
# upcasting, so keeping other as same numpy_dtype
other = other.astype(s.dtype.numpy_dtype)
result = op(s, other)
expected = s.combine(other, op)
if op_name in ("__rtruediv__", "__truediv__", "__div__"):
expected = expected.fillna(np.nan).astype(float)
if op_name == "__rtruediv__":
# TODO reverse operators result in object dtype
result = result.astype(float)
elif op_name.startswith("__r"):
# TODO reverse operators result in object dtype
# see https://github.com/pandas-dev/pandas/issues/22024
expected = expected.astype(s.dtype)
result = result.astype(s.dtype)
else:
# combine method result in 'biggest' (int64) dtype
expected = expected.astype(s.dtype)
pass
if (op_name == "__rpow__") and isinstance(other, pd.Series):
# TODO pow on Int arrays gives different result with NA
# see https://github.com/pandas-dev/pandas/issues/22022
result = result.fillna(1)
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
@pytest.mark.skip(reason="intNA does not error on ops")
def test_error(self, data, all_arithmetic_operators):
# other specific errors tested in the integer array specific tests
pass
class TestComparisonOps(base.BaseComparisonOpsTests):
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
result = op(s, other)
# Override to do the astype to boolean
expected = s.combine(other, op).astype("boolean")
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def check_opname(self, s, op_name, other, exc=None):
super().check_opname(s, op_name, other, exc=None)
def _compare_other(self, s, data, op_name, other):
self.check_opname(s, op_name, other)
class TestInterface(base.BaseInterfaceTests):
pass
class TestConstructors(base.BaseConstructorsTests):
pass
class TestReshaping(base.BaseReshapingTests):
pass
# for test_concat_mixed_dtypes test
# concat of an Integer and Int coerces to object dtype
# TODO(jreback) once integrated this would
class TestGetitem(base.BaseGetitemTests):
pass
class TestSetitem(base.BaseSetitemTests):
pass
class TestMissing(base.BaseMissingTests):
pass
class TestMethods(base.BaseMethodsTests):
@pytest.mark.skip(reason="uses nullable integer")
def test_value_counts(self, all_data, dropna):
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()
expected = pd.Series(other).value_counts(dropna=dropna).sort_index()
expected.index = expected.index.astype(all_data.dtype)
self.assert_series_equal(result, expected)
class TestCasting(base.BaseCastingTests):
pass
class TestGroupby(base.BaseGroupbyTests):
pass
class TestNumericReduce(base.BaseNumericReduceTests):
def check_reduce(self, s, op_name, skipna):
# overwrite to ensure pd.NA is tested instead of np.nan
# https://github.com/pandas-dev/pandas/issues/30958
result = getattr(s, op_name)(skipna=skipna)
if not skipna and s.isna().any():
expected = pd.NA
else:
expected = getattr(s.dropna().astype("int64"), op_name)(skipna=skipna)
tm.assert_almost_equal(result, expected)
class TestBooleanReduce(base.BaseBooleanReduceTests):
pass
class TestPrinting(base.BasePrintingTests):
pass
class TestParsing(base.BaseParsingTests):
pass
| bsd-3-clause |
acuzzio/GridQuantumPropagator | Scripts/multiGraphEneDipole.py | 1 | 7791 | '''
This scripts collects energies and transition dipole matrices from several h5 files and
makes graphs.
It is a 1D module
'''
from argparse import ArgumentParser
from collections import namedtuple
from itertools import repeat
import glob
import multiprocessing as mp
import numpy as np
from quantumpropagator import (retrieve_hdf5_data, makeJustAnother2Dgraph,
createHistogram, makeMultiLineDipoleGraph,
massOf, saveTraj, makeJustAnother2DgraphMULTI,
calcAngle, err)
import matplotlib.pyplot as plt
def read_single_arguments(single_inputs):
'''
This funcion reads the command line arguments
'''
parser = ArgumentParser()
parser.add_argument("-n", "--globalPattern",
dest="n",
type=str,
required=True,
help="it is the global pattern of rassi h5 files")
parser.add_argument("-p", "--parallel",
dest="p",
type=int,
help="number of processors if you want it parallel")
parser.add_argument("-g", "--graphs",
dest="g",
action='store_true',
help="it creates coords 1d graphs")
parser.add_argument("-k", "--kinetic",
dest="k",
action='store_true',
help="enables kinetic analysis on 1d")
args = parser.parse_args()
if args.n != None:
single_inputs = single_inputs._replace(glob=args.n)
if args.p != None:
single_inputs = single_inputs._replace(proc=args.p)
if args.g != None:
single_inputs = single_inputs._replace(graphs=args.g)
if args.k != None:
single_inputs = single_inputs._replace(kin=args.k)
return single_inputs
single_inputs = namedtuple("single_input", ("glob","proc", "kin", "graphs"))
def graphMultiRassi(globalExp,poolSize):
''' collects rassi data and create the elementwise graphs '''
allH5 = sorted(glob.glob(globalExp))
dime = len(allH5)
if dime == 0:
err("no files in {}".format(globalExp))
allH5First = allH5[0]
nstates = len(retrieve_hdf5_data(allH5First,'ROOT_ENERGIES'))
natoms = len(retrieve_hdf5_data(allH5First,'CENTER_LABELS'))
bigArray = np.empty((dime,3,nstates,nstates))
bigArrayNAC = np.empty((dime,nstates,nstates,natoms,3))
ind=0
for fileN in allH5:
[properties,NAC,ene] = retrieve_hdf5_data(fileN,
['DIPOLES','NAC','ROOT_ENERGIES'])
dmMat = properties # here...
bigArray[ind] = dmMat
print(NAC[0,1,9,1], NAC[0,1,8,1], NAC[0,1,3,1])
bigArrayNAC[ind] = NAC
ind += 1
std = np.std(bigArray[:,0,0,0])
allstd = np.average(np.abs(bigArray), axis = 0)
fn = 'heatMap.png'
transp = False
my_dpi = 150
ratio = (9, 16)
fig, ax1 = plt.subplots(figsize=ratio)
xticks = np.arange(nstates)+1
yticks = np.arange(nstates)+1
plt.subplot(311)
plt.imshow(allstd[0], cmap='hot', interpolation='nearest')
plt.subplot(312)
plt.imshow(allstd[1], cmap='hot', interpolation='nearest')
plt.subplot(313)
plt.imshow(allstd[2], cmap='hot', interpolation='nearest')
#plt.savefig(fn, bbox_inches='tight', dpi=my_dpi, transparent=transp)
plt.savefig(fn, bbox_inches='tight', dpi=my_dpi)
plt.close('all')
# I first want to make a graph of EACH ELEMENT
elems = [[x,y,z] for x in range(3) for y in range(nstates) for z in range(nstates)]
pool = mp.Pool(processes = poolSize)
#pool.map(doThisToEachElement, zip(elems, repeat(dime), repeat(bigArray)))
rows = [[x,y] for x in range(3) for y in range(nstates)]
#for row in rows:
# doDipoToEachRow(row, dime, bigArray)
# For some reason the perallel version of this does not work properly.
pool.map(doDipoToEachRow, zip(rows, repeat(dime), repeat(bigArray)))
for i in np.arange(2)+1:
lab = 'NacElement' + str(i)
makeJustAnother2Dgraph(lab, lab, bigArrayNAC[:,0,i,9,1])
def doDipoToEachRow(tupleInput):
(row, dime, bigArray) = tupleInput
[a,b] = row
label = str(a+1) + '_' + str(b+1)
makeMultiLineDipoleGraph(np.arange(dime),bigArray[:,a,b], 'All_from_' +
label, b)
def doThisToEachElement(tupleInput):
''' It creates two kind of graphs from the bigarray, elementwise'''
(elem, dime, bigArray) = tupleInput
[a,b,c] = elem
label = str(a+1) + '_' + str(b+1) + '_' + str(c+1)
makeJustAnother2Dgraph('Lin_' + label, label, bigArray[:,a,b,c])
#createHistogram(np.abs(bigArray[:,a,b,c]), 'His_' + label, binNum=20)
def kinAnalysis(globalExp, coorGraphs):
'''
Takes h5 files from global expression and calculates first derivative and
second along this coordinate for an attempt at kinetic energy
For now it only draw graphics...
'''
allH5 = sorted(glob.glob(globalExp))
dime = len(allH5)
if dime == 0:
err("no files in {}".format(globalExp))
allH5First = allH5[0]
nstates = len(retrieve_hdf5_data(allH5First,'SFS_ENERGIES'))
natoms = len(retrieve_hdf5_data(allH5First,'CENTER_COORDINATES'))
labels = retrieve_hdf5_data(allH5First,'CENTER_LABELS')
stringLabels = [ b[:1].decode("utf-8") for b in labels ]
print('\nnstates: {} \ndimension: {}'.format(nstates,dime))
bigArrayC = np.empty((dime,natoms,3))
bigArrayE = np.empty((dime,nstates))
bigArrayA1 = np.empty((dime))
# fill bigArrayC array
ind=0
for fileN in allH5:
singleCoord = retrieve_hdf5_data(fileN,'CENTER_COORDINATES')
energies = retrieve_hdf5_data(fileN,'SFS_ENERGIES')
coords = translateInCM(singleCoord, labels)
bigArrayC[ind] = coords
bigArrayE[ind] = energies
bigArrayA1[ind] = calcAngle(coords,2,3,4)
ind += 1
# true because we are in bohr and saveTaj is like that
saveTraj(bigArrayC, stringLabels, 'scanGeometriesCMfixed', True)
fileNameGraph = 'EnergiesAlongScan'
makeJustAnother2DgraphMULTI(bigArrayA1, bigArrayE,
fileNameGraph,'State', 1.0)
print('\nEnergy graph created:\n\neog ' + fileNameGraph + '.png\n')
# make graphs
if coorGraphs:
for alpha in range(3):
axes = ['X','Y','Z']
lab1 = axes[alpha]
for atomN in range(natoms):
lab2 = stringLabels[atomN] + str(atomN+1)
name = 'coord_' + lab1 + '_' + lab2
makeJustAnother2Dgraph(np.arange(dime), bigArrayC[:,atomN,alpha],name,name)
def translateInCM(geometry, labels):
'''
geometry :: (natoms,3) floats <- coordinates in angstrom
labels :: [Strings] <- atom types
this function calculates the center of mass and translate the molecule to
have the origin in it.
'''
# Molcas h5 has strings as bytes, so I need to decode the atomLabels to utf-8
atomMasses = np.array([ massOf(b[:1].decode("utf-8")) for b in labels ])
rightDimension = np.stack((atomMasses,atomMasses,atomMasses),1)
geomMass = geometry * rightDimension
centerOfMass = np.apply_along_axis(np.sum, 0, geomMass)/np.sum(atomMasses)
translationVector = np.tile(centerOfMass,(15,1))
newGeom = geometry - translationVector
return(newGeom)
def main():
''' Takes a list of rassi files and create graphs on Dipole transition
elements '''
inputs = single_inputs("*.rassi.h5", 1, False, False)
new_inp = read_single_arguments(inputs)
if new_inp.kin:
kinAnalysis(new_inp.glob, new_inp.graphs)
else:
graphMultiRassi(new_inp.glob, new_inp.proc)
if __name__ == "__main__":
main()
| gpl-3.0 |
huanzhang12/lightgbm-gpu | tests/python_package_test/test_sklearn.py | 3 | 6123 | # coding: utf-8
# pylint: skip-file
import unittest
import lightgbm as lgb
import numpy as np
from sklearn.base import clone
from sklearn.datasets import (load_boston, load_breast_cancer, load_digits,
load_svmlight_file)
from sklearn.externals import joblib
from sklearn.metrics import log_loss, mean_squared_error
from sklearn.model_selection import GridSearchCV, train_test_split
class template(object):
@staticmethod
def test_template(X_y=load_boston(True), model=lgb.LGBMRegressor,
feval=mean_squared_error, num_round=100,
custom_obj=None, predict_proba=False,
return_data=False, return_model=False):
X_train, X_test, y_train, y_test = train_test_split(*X_y, test_size=0.1, random_state=42)
if return_data:
return X_train, X_test, y_train, y_test
arguments = {'n_estimators': num_round, 'silent': True}
if custom_obj:
arguments['objective'] = custom_obj
gbm = model(**arguments)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=10, verbose=False)
if return_model:
return gbm
elif predict_proba:
return feval(y_test, gbm.predict_proba(X_test))
else:
return feval(y_test, gbm.predict(X_test))
class TestSklearn(unittest.TestCase):
def test_binary(self):
X_y = load_breast_cancer(True)
ret = template.test_template(X_y, lgb.LGBMClassifier, log_loss, predict_proba=True)
self.assertLess(ret, 0.15)
def test_regreesion(self):
self.assertLess(template.test_template() ** 0.5, 4)
def test_multiclass(self):
X_y = load_digits(10, True)
def multi_error(y_true, y_pred):
return np.mean(y_true != y_pred)
ret = template.test_template(X_y, lgb.LGBMClassifier, multi_error)
self.assertLess(ret, 0.2)
def test_lambdarank(self):
X_train, y_train = load_svmlight_file('../../examples/lambdarank/rank.train')
X_test, y_test = load_svmlight_file('../../examples/lambdarank/rank.test')
q_train = np.loadtxt('../../examples/lambdarank/rank.train.query')
q_test = np.loadtxt('../../examples/lambdarank/rank.test.query')
lgb_model = lgb.LGBMRanker().fit(X_train, y_train,
group=q_train,
eval_set=[(X_test, y_test)],
eval_group=[q_test],
eval_at=[1],
verbose=False,
callbacks=[lgb.reset_parameter(learning_rate=lambda x: 0.95 ** x * 0.1)])
def test_regression_with_custom_objective(self):
def objective_ls(y_true, y_pred):
grad = (y_pred - y_true)
hess = np.ones(len(y_true))
return grad, hess
ret = template.test_template(custom_obj=objective_ls)
self.assertLess(ret, 100)
def test_binary_classification_with_custom_objective(self):
def logregobj(y_true, y_pred):
y_pred = 1.0 / (1.0 + np.exp(-y_pred))
grad = y_pred - y_true
hess = y_pred * (1.0 - y_pred)
return grad, hess
X_y = load_digits(2, True)
def binary_error(y_test, y_pred):
return np.mean([int(p > 0.5) != y for y, p in zip(y_test, y_pred)])
ret = template.test_template(X_y, lgb.LGBMClassifier, feval=binary_error, custom_obj=logregobj)
self.assertLess(ret, 0.1)
def test_dart(self):
X_train, X_test, y_train, y_test = template.test_template(return_data=True)
gbm = lgb.LGBMRegressor(boosting_type='dart')
gbm.fit(X_train, y_train)
self.assertLessEqual(gbm.score(X_train, y_train), 1.)
def test_grid_search(self):
X_train, X_test, y_train, y_test = template.test_template(return_data=True)
params = {'boosting_type': ['dart', 'gbdt'],
'n_estimators': [15, 20],
'drop_rate': [0.1, 0.2]}
gbm = GridSearchCV(lgb.LGBMRegressor(), params, cv=3)
gbm.fit(X_train, y_train)
self.assertIn(gbm.best_params_['n_estimators'], [15, 20])
def test_clone_and_property(self):
gbm = template.test_template(return_model=True)
gbm_clone = clone(gbm)
self.assertIsInstance(gbm.booster_, lgb.Booster)
self.assertIsInstance(gbm.feature_importances_, np.ndarray)
clf = template.test_template(load_digits(2, True), model=lgb.LGBMClassifier, return_model=True)
self.assertListEqual(sorted(clf.classes_), [0, 1])
self.assertEqual(clf.n_classes_, 2)
self.assertIsInstance(clf.booster_, lgb.Booster)
self.assertIsInstance(clf.feature_importances_, np.ndarray)
def test_joblib(self):
gbm = template.test_template(num_round=10, return_model=True)
joblib.dump(gbm, 'lgb.pkl')
gbm_pickle = joblib.load('lgb.pkl')
self.assertIsInstance(gbm_pickle.booster_, lgb.Booster)
self.assertDictEqual(gbm.get_params(), gbm_pickle.get_params())
self.assertListEqual(list(gbm.feature_importances_), list(gbm_pickle.feature_importances_))
X_train, X_test, y_train, y_test = template.test_template(return_data=True)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], verbose=False)
gbm_pickle.fit(X_train, y_train, eval_set=[(X_test, y_test)], verbose=False)
for key in gbm.evals_result_:
for evals in zip(gbm.evals_result_[key], gbm_pickle.evals_result_[key]):
self.assertAlmostEqual(*evals, places=5)
pred_origin = gbm.predict(X_test)
pred_pickle = gbm_pickle.predict(X_test)
self.assertEqual(len(pred_origin), len(pred_pickle))
for preds in zip(pred_origin, pred_pickle):
self.assertAlmostEqual(*preds, places=5)
print("----------------------------------------------------------------------")
print("running test_sklearn.py")
unittest.main()
| mit |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/examples/animation/strip_chart_demo.py | 6 | 1514 | """
Emulate an oscilloscope. Requires the animation API introduced in
matplotlib 1.0 SVN.
"""
import matplotlib
import numpy as np
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
import matplotlib.animation as animation
class Scope:
def __init__(self, ax, maxt=2, dt=0.02):
self.ax = ax
self.dt = dt
self.maxt = maxt
self.tdata = [0]
self.ydata = [0]
self.line = Line2D(self.tdata, self.ydata)
self.ax.add_line(self.line)
self.ax.set_ylim(-.1, 1.1)
self.ax.set_xlim(0, self.maxt)
def update(self, y):
lastt = self.tdata[-1]
if lastt > self.tdata[0] + self.maxt: # reset the arrays
self.tdata = [self.tdata[-1]]
self.ydata = [self.ydata[-1]]
self.ax.set_xlim(self.tdata[0], self.tdata[0] + self.maxt)
self.ax.figure.canvas.draw()
t = self.tdata[-1] + self.dt
self.tdata.append(t)
self.ydata.append(y)
self.line.set_data(self.tdata, self.ydata)
return self.line,
def emitter(p=0.03):
'return a random value with probability p, else 0'
while True:
v = np.random.rand(1)
if v > p:
yield 0.
else:
yield np.random.rand(1)
fig = plt.figure()
ax = fig.add_subplot(111)
scope = Scope(ax)
# pass a generator in "emitter" to produce data for the update func
ani = animation.FuncAnimation(fig, scope.update, emitter, interval=10,
blit=True)
plt.show()
| mit |
arnabgho/sklearn-theano | sklearn_theano/utils/ports.py | 9 | 5242 | import warnings
from sklearn.cross_validation import ShuffleSplit
from itertools import chain
from sklearn.utils import safe_indexing
import numpy as np
import scipy.sparse as sp
# A port of sklearn 0.16 utilities
# to avoid validation issues in older sklearn
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: %s"
% str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _num_samples(x):
"""Return number of samples in array-like x."""
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %r" % x)
return x.shape[0] if hasattr(x, 'shape') else len(x)
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> a, b = np.arange(10).reshape((5, 2)), range(5)
>>> a
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(b)
[0, 1, 2, 3, 4]
>>> a_train, a_test, b_train, b_test = train_test_split(
... a, b, test_size=0.33, random_state=42)
...
>>> a_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> b_train
[2, 0, 3]
>>> a_test
array([[2, 3],
[8, 9]])
>>> b_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.")
force_arrays = options.pop('force_arrays', False)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if force_arrays:
warnings.warn("The force_arrays option is deprecated and will be "
"removed in sklearn 0.18.", DeprecationWarning)
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
| bsd-3-clause |
ctralie/TUMTopoTimeSeries2016 | Synthetic1DPeriodTests.py | 1 | 3277 | from VideoTools import *
from TDA import *
import os
import numpy as np
import scipy.io as sio
import scipy.interpolate as interp
from sklearn.decomposition import PCA
def getSlidingWindow(x, dim, Tau, dT):
NWindows = int(np.floor((N-dim*Tau)/dT))
X = np.zeros((NWindows, dim))
idx = np.arange(len(x))
for i in range(NWindows):
idxx = dT*i + Tau*np.arange(dim)
start = int(np.floor(idxx[0]))
end = int(np.ceil(idxx[-1]))
X[i, :] = interp.spline(idx[start:end+1], x[start:end+1], idxx)
return X
def getPulseTrain(NSamples, TMin, TMax, AmpMin, AmpMax):
x = np.zeros(NSamples)
x[0] = 1
i = 0
while i < NSamples:
i += TMin + int(np.round(np.random.randn()*(TMax-TMin)))
if i >= NSamples:
break
x[i] = AmpMin + (AmpMax-AmpMin)*np.random.randn()
return x
def convolveAndAddNoise(x, gaussSigma, noiseSigma):
gaussSigma = int(np.round(gaussSigma*3))
g = np.exp(-(np.arange(-gaussSigma, gaussSigma+1, dtype=np.float64))**2/(2*gaussSigma**2))
x = np.convolve(x, g, 'same')
x = x + noiseSigma*np.random.randn(len(x))
return x
def getSyntheticPulseTrain(NSamples, T, noiseSigma, gaussSigma):
x = np.zeros(NSamples)
x[0::T] = 1
x = convolveAndAddNoise(x, gaussSigma, noiseSigma)
return x
if __name__ == '__main__':
T = 40 #The period in number of samples
NPeriods = 3 #How many periods to go through
N = T*NPeriods #The total number of samples
t = np.linspace(0, 2*np.pi*NPeriods, N+1)[0:N] #Sampling indices in time
x = np.cos(t) #The final signal
wins = np.linspace(2, 50, 100)
dim = 10
res = np.zeros(len(wins))
for i in range(len(wins)):
Tau = wins[i]/float(dim-1)
dT = (N-dim*Tau)/float(N)
XS = getSlidingWindow(x, dim, Tau, dT)
#Mean-center and normalize sliding window
#XS = XS - np.mean(XS, 1)[:, None]
#XS = XS/np.sqrt(np.sum(XS**2, 1))[:, None]
#XS = XS - np.mean(XS, 0)[None, :]
#XS = XS/np.sqrt(np.sum(XS**2, 1))[:, None]
#XS = XS/np.sqrt(XS.shape[1])
PDs = doRipsFiltration(XS, 1)
fig = plt.figure(figsize=(12, 12))
ax = fig.add_subplot(221)
pca = PCA(n_components = 2)
Y = pca.fit_transform(XS)
ax.set_title("PCA of Sliding Window Embedding")
ax.scatter(Y[:, 0], Y[:, 1])
ax.set_aspect('equal', 'datalim')
plt.subplot(223)
W = int(np.round(wins[i]))
plt.plot(np.arange(len(x)), x, 'b')
plt.hold(True)
plt.plot(np.arange(W), x[0:W], 'r')
plt.plot([wins[i], wins[i]], [np.min(x), np.max(x)])
plt.title("Signal Chunk")
plt.subplot(224)
plt.plot(wins, res)
plt.xlabel("Window Size")
plt.ylabel("Maximum Persistence")
if len(PDs) == 2:
if PDs[1].size > 0:
ax2 = fig.add_subplot(222)
plotDGM(PDs[1])
plt.title("Window = %g"%wins[i])
plt.savefig("PD%i.png"%i, bbox_inches='tight')
if len(PDs) < 2:
continue
if PDs[1].size > 0:
res[i] = np.max(PDs[1][:, 1] - PDs[1][:, 0])
sio.savemat("res.mat", {"res":res})
| apache-2.0 |
lemonade512/BluebonnetsPointsApp | docs/conf.py | 1 | 8764 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- Hack for ReadTheDocs ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/api")
module_dir = os.path.join(__location__, "../bluebonnetspointsapp")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'BluebonnetsPointsApp'
copyright = u'2016, Bryce Arden'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from bluebonnetspointsapp import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bluebonnetspointsapp-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'BluebonnetsPointsApp Documentation',
u'Bryce Arden', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| gpl-3.0 |
tbabej/astropy | astropy/visualization/units.py | 2 | 2941 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
__doctest_skip__ = ['quantity_support']
def quantity_support(format='latex_inline'):
"""
Enable support for plotting `astropy.units.Quantity` instances in
matplotlib.
May be (optionally) used with a ``with`` statement.
>>> import matplotlib.pyplot as plt
>>> from astropy import units as u
>>> from astropy import visualization
>>> with visualization.quantity_support():
... plt.figure()
... plt.plot([1, 2, 3] * u.m)
[...]
... plt.plot([101, 125, 150] * u.cm)
[...]
... plt.draw()
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to ``latex_inline``.
"""
from .. import units as u
from matplotlib import units
from matplotlib import ticker
def rad_fn(x, pos=None):
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return '0'
elif n == 1:
return 'π/2'
elif n == 2:
return 'π'
elif n % 2 == 0:
return '{0}π'.format(n / 2)
else:
return '{0}π/2'.format(n)
class MplQuantityConverter(units.ConversionInterface):
def __init__(self):
if u.Quantity not in units.registry:
units.registry[u.Quantity] = self
self._remove = True
else:
self._remove = False
@staticmethod
def axisinfo(unit, axis):
if unit == u.radian:
return units.AxisInfo(
majloc=ticker.MultipleLocator(base=np.pi/2),
majfmt=ticker.FuncFormatter(rad_fn),
label=unit.to_string(),
)
elif unit == u.degree:
return units.AxisInfo(
majloc=ticker.AutoLocator(),
majfmt=ticker.FormatStrFormatter('%i°'),
label=unit.to_string(),
)
elif unit is not None:
return units.AxisInfo(label=unit.to_string(format))
return None
@staticmethod
def convert(val, unit, axis):
if isinstance(val, u.Quantity):
return val.to(unit).value
else:
return val
@staticmethod
def default_units(x, axis):
if hasattr(x, 'unit'):
return x.unit
return None
def __enter__(self):
return self
def __exit__(self, type, value, tb):
if self._remove:
del units.registry[u.Quantity]
return MplQuantityConverter()
| bsd-3-clause |
HolgerPeters/scikit-learn | sklearn/cluster/tests/test_k_means.py | 26 | 32656 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.exceptions import DataConversionWarning
from sklearn.metrics.cluster import homogeneity_score
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_elkan_results():
rnd = np.random.RandomState(0)
X_normal = rnd.normal(size=(50, 10))
X_blobs, _ = make_blobs(random_state=0)
km_full = KMeans(algorithm='full', n_clusters=5, random_state=0, n_init=1)
km_elkan = KMeans(algorithm='elkan', n_clusters=5,
random_state=0, n_init=1)
for X in [X_normal, X_blobs]:
km_full.fit(X)
km_elkan.fit(X)
assert_array_almost_equal(km_elkan.cluster_centers_,
km_full.cluster_centers_)
assert_array_equal(km_elkan.labels_, km_full.labels_)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_explicit_init_shape():
# test for sensible errors when giving explicit init
# with wrong number of features or clusters
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 3))
for Class in [KMeans, MiniBatchKMeans]:
# mismatch of number of features
km = Class(n_init=1, init=X[:, :2], n_clusters=len(X))
msg = "does not match the number of features of the data"
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1,
init=lambda X_, k, random_state: X_[:, :2],
n_clusters=len(X))
assert_raises_regex(ValueError, msg, km.fit, X)
# mismatch of number of clusters
msg = "does not match the number of clusters"
km = Class(n_init=1, init=X[:2, :], n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1,
init=lambda X_, k, random_state: X_[:2, :],
n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42, n_clusters=2)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
msg = "does not match the number of clusters"
assert_raises_regex(ValueError, msg, MiniBatchKMeans(init=test_init,
random_state=42).fit,
X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1,
algorithm='elkan')
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1,
algorithm='elkan')
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_int_input():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
for dtype in [np.int32, np.int64]:
X_int = np.array(X_list, dtype=dtype)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
for km in fitted_models:
assert_equal(km.cluster_centers_.dtype, np.float64)
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_predict_equal_labels():
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,
algorithm='full')
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,
algorithm='elkan')
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
def test_full_vs_elkan():
km1 = KMeans(algorithm='full', random_state=13)
km2 = KMeans(algorithm='elkan', random_state=13)
km1.fit(X)
km2.fit(X)
homogeneity_score(km1.predict(X), km2.predict(X)) == 1.0
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
def test_max_iter_error():
km = KMeans(max_iter=-1)
assert_raise_message(ValueError, 'Number of iterations should be',
km.fit, X)
def test_float_precision():
km = KMeans(n_init=1, random_state=30)
mb_km = MiniBatchKMeans(n_init=1, random_state=30)
inertia = {}
X_new = {}
centers = {}
for estimator in [km, mb_km]:
for is_sparse in [False, True]:
for dtype in [np.float64, np.float32]:
if is_sparse:
X_test = sp.csr_matrix(X_csr, dtype=dtype)
else:
X_test = X.astype(dtype)
estimator.fit(X_test)
# dtype of cluster centers has to be the dtype of the input
# data
assert_equal(estimator.cluster_centers_.dtype, dtype)
inertia[dtype] = estimator.inertia_
X_new[dtype] = estimator.transform(X_test)
centers[dtype] = estimator.cluster_centers_
# ensure the extracted row is a 2d array
assert_equal(estimator.predict(X_test[:1]),
estimator.labels_[0])
if hasattr(estimator, 'partial_fit'):
estimator.partial_fit(X_test[0:3])
# dtype of cluster centers has to stay the same after
# partial_fit
assert_equal(estimator.cluster_centers_.dtype, dtype)
# compare arrays with low precision since the difference between
# 32 and 64 bit sometimes makes a difference up to the 4th decimal
# place
assert_array_almost_equal(inertia[np.float32], inertia[np.float64],
decimal=4)
assert_array_almost_equal(X_new[np.float32], X_new[np.float64],
decimal=4)
assert_array_almost_equal(centers[np.float32], centers[np.float64],
decimal=4)
def test_k_means_init_centers():
# This test is used to check KMeans won't mutate the user provided input
# array silently even if input data and init centers have the same type
X_small = np.array([[1.1, 1.1], [-7.5, -7.5], [-1.1, -1.1], [7.5, 7.5]])
init_centers = np.array([[0.0, 0.0], [5.0, 5.0], [-5.0, -5.0]])
for dtype in [np.int32, np.int64, np.float32, np.float64]:
X_test = dtype(X_small)
init_centers_test = dtype(init_centers)
assert_array_equal(init_centers, init_centers_test)
km = KMeans(init=init_centers_test, n_clusters=3, n_init=1)
km.fit(X_test)
assert_equal(False, np.may_share_memory(km.cluster_centers_, init_centers))
def test_sparse_k_means_init_centers():
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
# Get a local optimum
centers = KMeans(n_clusters=3).fit(X).cluster_centers_
# Fit starting from a local optimum shouldn't change the solution
np.testing.assert_allclose(
centers,
KMeans(n_clusters=3,
init=centers,
n_init=1).fit(X).cluster_centers_
)
# The same should be true when X is sparse
X_sparse = sp.csr_matrix(X)
np.testing.assert_allclose(
centers,
KMeans(n_clusters=3,
init=centers,
n_init=1).fit(X_sparse).cluster_centers_
)
def test_sparse_validate_centers():
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
# Get a local optimum
centers = KMeans(n_clusters=4).fit(X).cluster_centers_
# Test that a ValueError is raised for validate_center_shape
classifier = KMeans(n_clusters=3, init=centers, n_init=1)
msg = "The shape of the initial centers \(\(4L?, 4L?\)\) " \
"does not match the number of clusters 3"
assert_raises_regex(ValueError, msg, classifier.fit, X)
| bsd-3-clause |
ishanic/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 142 | 6276 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples(np.arange(1000) * 100)
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
mjgrav2001/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
lukeiwanski/tensorflow-opencl | tensorflow/contrib/learn/python/learn/estimators/estimator.py | 3 | 53280 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import inspect
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import summary_io
from tensorflow.python.util import compat
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existance of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _get_arguments(func):
"""Returns list of arguments this function has."""
if hasattr(func, '__code__'):
# Regular function.
return inspect.getargspec(func).args
elif hasattr(func, '__call__'):
# Callable object.
return _get_arguments(func.__call__)
elif hasattr(func, 'func'):
# Partial function.
return _get_arguments(func.func)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items()))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = summary_io.SummaryWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
else:
logging.warn('Skipping summary for %s, must be a float or np.float32.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Users should not instantiate or subclass this class. Instead, use `Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overriden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: A RunConfig instance.
"""
# Model directory.
self._model_dir = model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
eval_dict = self._get_eval_ops(features, labels, metrics).eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=config_pb2.ConfigProto(allow_soft_placement=True))
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
config=config_pb2.ConfigProto(allow_soft_placement=True)))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
all_hooks.extend(hooks)
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=config_pb2.ConfigProto(allow_soft_placement=True)
) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
summary_io.SummaryWriterCache.clear()
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _get_arguments(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) has less than 4 '
'arguments, but not None params (%s) are passed.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _get_arguments(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
return model_fn_results
# Here model_fn_ops should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
features, labels = self._feature_engineering_fn(features, labels)
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(
self, export_dir_base, serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
with tf_session.Session('') as session:
variables.initialize_local_variables()
data_flow_ops.tables_initializer()
saver_for_restore = saver.Saver(
variables.global_variables(),
sharded=True)
saver_for_restore.restore(session, checkpoint_path)
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
data_flow_ops.tables_initializer())
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name='score')
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
| apache-2.0 |
Ziqi-Li/bknqgis | pandas/pandas/tests/io/msgpack/test_case.py | 13 | 2740 | # coding: utf-8
from pandas.io.msgpack import packb, unpackb
def check(length, obj):
v = packb(obj)
assert len(v) == length, \
"%r length should be %r but get %r" % (obj, length, len(v))
assert unpackb(v, use_list=0) == obj
def test_1():
for o in [None, True, False, 0, 1, (1 << 6), (1 << 7) - 1, -1,
-((1 << 5) - 1), -(1 << 5)]:
check(1, o)
def test_2():
for o in [1 << 7, (1 << 8) - 1, -((1 << 5) + 1), -(1 << 7)]:
check(2, o)
def test_3():
for o in [1 << 8, (1 << 16) - 1, -((1 << 7) + 1), -(1 << 15)]:
check(3, o)
def test_5():
for o in [1 << 16, (1 << 32) - 1, -((1 << 15) + 1), -(1 << 31)]:
check(5, o)
def test_9():
for o in [1 << 32, (1 << 64) - 1, -((1 << 31) + 1), -(1 << 63), 1.0, 0.1,
-0.1, -1.0]:
check(9, o)
def check_raw(overhead, num):
check(num + overhead, b" " * num)
def test_fixraw():
check_raw(1, 0)
check_raw(1, (1 << 5) - 1)
def test_raw16():
check_raw(3, 1 << 5)
check_raw(3, (1 << 16) - 1)
def test_raw32():
check_raw(5, 1 << 16)
def check_array(overhead, num):
check(num + overhead, (None, ) * num)
def test_fixarray():
check_array(1, 0)
check_array(1, (1 << 4) - 1)
def test_array16():
check_array(3, 1 << 4)
check_array(3, (1 << 16) - 1)
def test_array32():
check_array(5, (1 << 16))
def match(obj, buf):
assert packb(obj) == buf
assert unpackb(buf, use_list=0) == obj
def test_match():
cases = [
(None, b'\xc0'),
(False, b'\xc2'),
(True, b'\xc3'),
(0, b'\x00'),
(127, b'\x7f'),
(128, b'\xcc\x80'),
(256, b'\xcd\x01\x00'),
(-1, b'\xff'),
(-33, b'\xd0\xdf'),
(-129, b'\xd1\xff\x7f'),
({1: 1}, b'\x81\x01\x01'),
(1.0, b"\xcb\x3f\xf0\x00\x00\x00\x00\x00\x00"),
((), b'\x90'),
(tuple(range(15)), (b"\x9f\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09"
b"\x0a\x0b\x0c\x0d\x0e")),
(tuple(range(16)), (b"\xdc\x00\x10\x00\x01\x02\x03\x04\x05\x06\x07"
b"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f")),
({}, b'\x80'),
(dict([(x, x) for x in range(15)]),
(b'\x8f\x00\x00\x01\x01\x02\x02\x03\x03\x04\x04\x05\x05\x06\x06\x07'
b'\x07\x08\x08\t\t\n\n\x0b\x0b\x0c\x0c\r\r\x0e\x0e')),
(dict([(x, x) for x in range(16)]),
(b'\xde\x00\x10\x00\x00\x01\x01\x02\x02\x03\x03\x04\x04\x05\x05\x06'
b'\x06\x07\x07\x08\x08\t\t\n\n\x0b\x0b\x0c\x0c\r\r\x0e\x0e'
b'\x0f\x0f')),
]
for v, p in cases:
match(v, p)
def test_unicode():
assert unpackb(packb('foobar'), use_list=1) == b'foobar'
| gpl-2.0 |
themrmax/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 85 | 5728 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[[rng.randint(0, n_queries)]]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', prop=dict(size='small'))
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
rexshihaoren/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
nilbody/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_DEPRECATED_bernoulli_synthetic_data_mediumGBM.py | 1 | 2622 | from builtins import zip
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o import H2OFrame
import numpy as np
import scipy.stats
from sklearn import ensemble
from sklearn.metrics import roc_auc_score
def bernoulli_synthetic_data_gbm_medium():
# Generate training dataset (adaptation of http://www.stat.missouri.edu/~speckman/stat461/boost.R)
train_rows = 10000
train_cols = 10
# Generate variables V1, ... V10
X_train = np.random.randn(train_rows, train_cols)
# y = +1 if sum_i x_{ij}^2 > chisq median on 10 df
y_train = np.asarray([1 if rs > scipy.stats.chi2.ppf(0.5, 10) else -1 for rs in [sum(r) for r in
np.multiply(X_train,X_train).tolist()]])
# Train scikit gbm
# TODO: grid-search
distribution = "bernoulli"
ntrees = 150
min_rows = 1
max_depth = 2
learn_rate = .01
nbins = 20
gbm_sci = ensemble.GradientBoostingClassifier(learning_rate=learn_rate, n_estimators=ntrees, max_depth=max_depth,
min_samples_leaf=min_rows, max_features=None)
gbm_sci.fit(X_train,y_train)
# Generate testing dataset
test_rows = 2000
test_cols = 10
# Generate variables V1, ... V10
X_test = np.random.randn(test_rows, test_cols)
# y = +1 if sum_i x_{ij}^2 > chisq median on 10 df
y_test = np.asarray([1 if rs > scipy.stats.chi2.ppf(0.5, 10) else -1 for rs in [sum(r) for r in
np.multiply(X_test,X_test).tolist()]])
# Score (AUC) the scikit gbm model on the test data
auc_sci = roc_auc_score(y_test, gbm_sci.predict_proba(X_test)[:,1])
# Compare this result to H2O
train_h2o = H2OFrame(np.column_stack((y_train, X_train)).tolist())
test_h2o = H2OFrame(np.column_stack((y_test, X_test)).tolist())
gbm_h2o = h2o.gbm(x=train_h2o[1:], y=train_h2o["C1"].asfactor(), distribution=distribution, ntrees=ntrees,
min_rows=min_rows, max_depth=max_depth, learn_rate=learn_rate, nbins=nbins)
gbm_perf = gbm_h2o.model_performance(test_h2o)
auc_h2o = gbm_perf.auc()
#Log.info(paste("scikit AUC:", auc_sci, "\tH2O AUC:", auc_h2o))
assert abs(auc_h2o - auc_sci) < 1e-2, "h2o (auc) performance degradation, with respect to scikit. h2o auc: {0} " \
"scickit auc: {1}".format(auc_h2o, auc_sci)
if __name__ == "__main__":
pyunit_utils.standalone_test(bernoulli_synthetic_data_gbm_medium)
else:
bernoulli_synthetic_data_gbm_medium()
| apache-2.0 |
jzt5132/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
harisbal/pandas | pandas/core/algorithms.py | 3 | 60557 | """
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
from __future__ import division
from warnings import warn, catch_warnings, simplefilter
from textwrap import dedent
import numpy as np
from pandas.core.dtypes.cast import (
maybe_promote, construct_1d_object_array_from_listlike)
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndex,
ABCIndexClass)
from pandas.core.dtypes.common import (
is_array_like,
is_unsigned_integer_dtype, is_signed_integer_dtype,
is_integer_dtype, is_complex_dtype,
is_object_dtype,
is_extension_array_dtype,
is_categorical_dtype, is_sparse,
is_period_dtype,
is_numeric_dtype, is_float_dtype,
is_bool_dtype, needs_i8_conversion,
is_datetimetz,
is_datetime64_any_dtype, is_datetime64tz_dtype,
is_timedelta64_dtype, is_datetimelike,
is_interval_dtype, is_scalar, is_list_like,
ensure_platform_int, ensure_object,
ensure_float64, ensure_uint64,
ensure_int64)
from pandas.core.dtypes.missing import isna, na_value_for_dtype
from pandas.core import common as com
from pandas._libs import algos, lib, hashtable as htable
from pandas._libs.tslib import iNaT
from pandas.util._decorators import (Appender, Substitution,
deprecate_kwarg)
_shared_docs = {}
# --------------- #
# dtype access #
# --------------- #
def _ensure_data(values, dtype=None):
"""
routine to ensure that our data is of the correct
input dtype for lower-level routines
This will coerce:
- ints -> int64
- uint -> uint64
- bool -> uint64 (TODO this should be uint8)
- datetimelike -> i8
- datetime64tz -> i8 (in local tz)
- categorical -> codes
Parameters
----------
values : array-like
dtype : pandas_dtype, optional
coerce to this dtype
Returns
-------
(ndarray, pandas_dtype, algo dtype as a string)
"""
# we check some simple dtypes first
try:
if is_object_dtype(dtype):
return ensure_object(np.asarray(values)), 'object', 'object'
if is_bool_dtype(values) or is_bool_dtype(dtype):
# we are actually coercing to uint64
# until our algos support uint8 directly (see TODO)
return np.asarray(values).astype('uint64'), 'bool', 'uint64'
elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype):
return ensure_int64(values), 'int64', 'int64'
elif (is_unsigned_integer_dtype(values) or
is_unsigned_integer_dtype(dtype)):
return ensure_uint64(values), 'uint64', 'uint64'
elif is_float_dtype(values) or is_float_dtype(dtype):
return ensure_float64(values), 'float64', 'float64'
elif is_object_dtype(values) and dtype is None:
return ensure_object(np.asarray(values)), 'object', 'object'
elif is_complex_dtype(values) or is_complex_dtype(dtype):
# ignore the fact that we are casting to float
# which discards complex parts
with catch_warnings():
simplefilter("ignore", np.ComplexWarning)
values = ensure_float64(values)
return values, 'float64', 'float64'
except (TypeError, ValueError, OverflowError):
# if we are trying to coerce to a dtype
# and it is incompat this will fall thru to here
return ensure_object(values), 'object', 'object'
# datetimelike
if (needs_i8_conversion(values) or
is_period_dtype(dtype) or
is_datetime64_any_dtype(dtype) or
is_timedelta64_dtype(dtype)):
if is_period_dtype(values) or is_period_dtype(dtype):
from pandas import PeriodIndex
values = PeriodIndex(values)
dtype = values.dtype
elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype):
from pandas import TimedeltaIndex
values = TimedeltaIndex(values)
dtype = values.dtype
else:
# Datetime
from pandas import DatetimeIndex
values = DatetimeIndex(values)
dtype = values.dtype
return values.asi8, dtype, 'int64'
elif (is_categorical_dtype(values) and
(is_categorical_dtype(dtype) or dtype is None)):
values = getattr(values, 'values', values)
values = values.codes
dtype = 'category'
# we are actually coercing to int64
# until our algos support int* directly (not all do)
values = ensure_int64(values)
return values, dtype, 'int64'
# we have failed, return object
values = np.asarray(values, dtype=np.object)
return ensure_object(values), 'object', 'object'
def _reconstruct_data(values, dtype, original):
"""
reverse of _ensure_data
Parameters
----------
values : ndarray
dtype : pandas_dtype
original : ndarray-like
Returns
-------
Index for extension types, otherwise ndarray casted to dtype
"""
from pandas import Index
if is_extension_array_dtype(dtype):
values = dtype.construct_array_type()._from_sequence(values)
elif is_datetime64tz_dtype(dtype) or is_period_dtype(dtype):
values = Index(original)._shallow_copy(values, name=None)
elif is_bool_dtype(dtype):
values = values.astype(dtype)
# we only support object dtypes bool Index
if isinstance(original, Index):
values = values.astype(object)
elif dtype is not None:
values = values.astype(dtype)
return values
def _ensure_arraylike(values):
"""
ensure that we are arraylike if not already
"""
if not is_array_like(values):
inferred = lib.infer_dtype(values)
if inferred in ['mixed', 'string', 'unicode']:
if isinstance(values, tuple):
values = list(values)
values = construct_1d_object_array_from_listlike(values)
else:
values = np.asarray(values)
return values
_hashtables = {
'float64': (htable.Float64HashTable, htable.Float64Vector),
'uint64': (htable.UInt64HashTable, htable.UInt64Vector),
'int64': (htable.Int64HashTable, htable.Int64Vector),
'string': (htable.StringHashTable, htable.ObjectVector),
'object': (htable.PyObjectHashTable, htable.ObjectVector)
}
def _get_hashtable_algo(values):
"""
Parameters
----------
values : arraylike
Returns
-------
tuples(hashtable class,
vector class,
values,
dtype,
ndtype)
"""
values, dtype, ndtype = _ensure_data(values)
if ndtype == 'object':
# its cheaper to use a String Hash Table than Object
if lib.infer_dtype(values) in ['string']:
ndtype = 'string'
else:
ndtype = 'object'
htable, table = _hashtables[ndtype]
return (htable, table, values, dtype, ndtype)
def _get_data_algo(values, func_map):
if is_categorical_dtype(values):
values = values._values_for_rank()
values, dtype, ndtype = _ensure_data(values)
if ndtype == 'object':
# its cheaper to use a String Hash Table than Object
if lib.infer_dtype(values) in ['string']:
ndtype = 'string'
f = func_map.get(ndtype, func_map['object'])
return f, values
# --------------- #
# top-level algos #
# --------------- #
def match(to_match, values, na_sentinel=-1):
"""
Compute locations of to_match into values
Parameters
----------
to_match : array-like
values to find positions of
values : array-like
Unique set of values
na_sentinel : int, default -1
Value to mark "not found"
Examples
--------
Returns
-------
match : ndarray of integers
"""
values = com.asarray_tuplesafe(values)
htable, _, values, dtype, ndtype = _get_hashtable_algo(values)
to_match, _, _ = _ensure_data(to_match, dtype)
table = htable(min(len(to_match), 1000000))
table.map_locations(values)
result = table.lookup(to_match)
if na_sentinel != -1:
# replace but return a numpy array
# use a Series because it handles dtype conversions properly
from pandas import Series
result = Series(result.ravel()).replace(-1, na_sentinel)
result = result.values.reshape(result.shape)
return result
def unique(values):
"""
Hash table-based unique. Uniques are returned in order
of appearance. This does NOT sort.
Significantly faster than numpy.unique. Includes NA values.
Parameters
----------
values : 1d array-like
Returns
-------
unique values.
- If the input is an Index, the return is an Index
- If the input is a Categorical dtype, the return is a Categorical
- If the input is a Series/ndarray, the return will be an ndarray
Examples
--------
>>> pd.unique(pd.Series([2, 1, 3, 3]))
array([2, 1, 3])
>>> pd.unique(pd.Series([2] + [1] * 5))
array([2, 1])
>>> pd.unique(pd.Series([pd.Timestamp('20160101'),
... pd.Timestamp('20160101')]))
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
... pd.Timestamp('20160101', tz='US/Eastern')]))
array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')],
dtype=object)
>>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'),
... pd.Timestamp('20160101', tz='US/Eastern')]))
DatetimeIndex(['2016-01-01 00:00:00-05:00'],
... dtype='datetime64[ns, US/Eastern]', freq=None)
>>> pd.unique(list('baabc'))
array(['b', 'a', 'c'], dtype=object)
An unordered Categorical will return categories in the
order of appearance.
>>> pd.unique(pd.Series(pd.Categorical(list('baabc'))))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.unique(pd.Series(pd.Categorical(list('baabc'),
... categories=list('abc'))))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.unique(pd.Series(pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)))
[b, a, c]
Categories (3, object): [a < b < c]
An array of tuples
>>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')])
array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
See Also
--------
pandas.Index.unique
pandas.Series.unique
"""
values = _ensure_arraylike(values)
if is_extension_array_dtype(values):
# Dispatch to extension dtype's unique.
return values.unique()
original = values
htable, _, values, dtype, ndtype = _get_hashtable_algo(values)
table = htable(len(values))
uniques = table.unique(values)
uniques = _reconstruct_data(uniques, dtype, original)
if isinstance(original, ABCSeries) and is_datetime64tz_dtype(dtype):
# we are special casing datetime64tz_dtype
# to return an object array of tz-aware Timestamps
# TODO: it must return DatetimeArray with tz in pandas 2.0
uniques = uniques.astype(object).values
return uniques
unique1d = unique
def isin(comps, values):
"""
Compute the isin boolean array
Parameters
----------
comps: array-like
values: array-like
Returns
-------
boolean array same length as comps
"""
if not is_list_like(comps):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{comps_type}]"
.format(comps_type=type(comps).__name__))
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)):
values = construct_1d_object_array_from_listlike(list(values))
if is_categorical_dtype(comps):
# TODO(extension)
# handle categoricals
return comps._values.isin(values)
comps = com.values_from_object(comps)
comps, dtype, _ = _ensure_data(comps)
values, _, _ = _ensure_data(values, dtype=dtype)
# faster for larger cases to use np.in1d
f = lambda x, y: htable.ismember_object(x, values)
# GH16012
# Ensure np.in1d doesn't get object types or it *may* throw an exception
if len(comps) > 1000000 and not is_object_dtype(comps):
f = lambda x, y: np.in1d(x, y)
elif is_integer_dtype(comps):
try:
values = values.astype('int64', copy=False)
comps = comps.astype('int64', copy=False)
f = lambda x, y: htable.ismember_int64(x, y)
except (TypeError, ValueError, OverflowError):
values = values.astype(object)
comps = comps.astype(object)
elif is_float_dtype(comps):
try:
values = values.astype('float64', copy=False)
comps = comps.astype('float64', copy=False)
f = lambda x, y: htable.ismember_float64(x, y)
except (TypeError, ValueError):
values = values.astype(object)
comps = comps.astype(object)
return f(comps, values)
def _factorize_array(values, na_sentinel=-1, size_hint=None,
na_value=None):
"""Factorize an array-like to labels and uniques.
This doesn't do any coercion of types or unboxing before factorization.
Parameters
----------
values : ndarray
na_sentinel : int, default -1
size_hint : int, optional
Passsed through to the hashtable's 'get_labels' method
na_value : object, optional
A value in `values` to consider missing. Note: only use this
parameter when you know that you don't have any values pandas would
consider missing in the array (NaN for float data, iNaT for
datetimes, etc.).
Returns
-------
labels, uniques : ndarray
"""
(hash_klass, _), values = _get_data_algo(values, _hashtables)
table = hash_klass(size_hint or len(values))
labels, uniques = table.factorize(values, na_sentinel=na_sentinel,
na_value=na_value)
labels = ensure_platform_int(labels)
return labels, uniques
_shared_docs['factorize'] = """
Encode the object as an enumerated type or categorical variable.
This method is useful for obtaining a numeric representation of an
array when all that matters is identifying distinct values. `factorize`
is available as both a top-level function :func:`pandas.factorize`,
and as a method :meth:`Series.factorize` and :meth:`Index.factorize`.
Parameters
----------
%(values)s%(sort)s%(order)s
na_sentinel : int, default -1
Value to mark "not found".
%(size_hint)s\
Returns
-------
labels : ndarray
An integer ndarray that's an indexer into `uniques`.
``uniques.take(labels)`` will have the same values as `values`.
uniques : ndarray, Index, or Categorical
The unique valid values. When `values` is Categorical, `uniques`
is a Categorical. When `values` is some other pandas object, an
`Index` is returned. Otherwise, a 1-D ndarray is returned.
.. note ::
Even if there's a missing value in `values`, `uniques` will
*not* contain an entry for it.
See Also
--------
pandas.cut : Discretize continuous-valued array.
pandas.unique : Find the unique value in an array.
Examples
--------
These examples all show factorize as a top-level method like
``pd.factorize(values)``. The results are identical for methods like
:meth:`Series.factorize`.
>>> labels, uniques = pd.factorize(['b', 'b', 'a', 'c', 'b'])
>>> labels
array([0, 0, 1, 2, 0])
>>> uniques
array(['b', 'a', 'c'], dtype=object)
With ``sort=True``, the `uniques` will be sorted, and `labels` will be
shuffled so that the relationship is the maintained.
>>> labels, uniques = pd.factorize(['b', 'b', 'a', 'c', 'b'], sort=True)
>>> labels
array([1, 1, 0, 2, 1])
>>> uniques
array(['a', 'b', 'c'], dtype=object)
Missing values are indicated in `labels` with `na_sentinel`
(``-1`` by default). Note that missing values are never
included in `uniques`.
>>> labels, uniques = pd.factorize(['b', None, 'a', 'c', 'b'])
>>> labels
array([ 0, -1, 1, 2, 0])
>>> uniques
array(['b', 'a', 'c'], dtype=object)
Thus far, we've only factorized lists (which are internally coerced to
NumPy arrays). When factorizing pandas objects, the type of `uniques`
will differ. For Categoricals, a `Categorical` is returned.
>>> cat = pd.Categorical(['a', 'a', 'c'], categories=['a', 'b', 'c'])
>>> labels, uniques = pd.factorize(cat)
>>> labels
array([0, 0, 1])
>>> uniques
[a, c]
Categories (3, object): [a, b, c]
Notice that ``'b'`` is in ``uniques.categories``, despite not being
present in ``cat.values``.
For all other pandas objects, an Index of the appropriate type is
returned.
>>> cat = pd.Series(['a', 'a', 'c'])
>>> labels, uniques = pd.factorize(cat)
>>> labels
array([0, 0, 1])
>>> uniques
Index(['a', 'c'], dtype='object')
"""
@Substitution(
values=dedent("""\
values : sequence
A 1-D sequence. Sequences that aren't pandas objects are
coerced to ndarrays before factorization.
"""),
order=dedent("""\
order
.. deprecated:: 0.23.0
This parameter has no effect and is deprecated.
"""),
sort=dedent("""\
sort : bool, default False
Sort `uniques` and shuffle `labels` to maintain the
relationship.
"""),
size_hint=dedent("""\
size_hint : int, optional
Hint to the hashtable sizer.
"""),
)
@Appender(_shared_docs['factorize'])
@deprecate_kwarg(old_arg_name='order', new_arg_name=None)
def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
# Implementation notes: This method is responsible for 3 things
# 1.) coercing data to array-like (ndarray, Index, extension array)
# 2.) factorizing labels and uniques
# 3.) Maybe boxing the output in an Index
#
# Step 2 is dispatched to extension types (like Categorical). They are
# responsible only for factorization. All data coercion, sorting and boxing
# should happen here.
values = _ensure_arraylike(values)
original = values
if is_extension_array_dtype(values):
values = getattr(values, '_values', values)
labels, uniques = values.factorize(na_sentinel=na_sentinel)
dtype = original.dtype
else:
values, dtype, _ = _ensure_data(values)
if (is_datetime64_any_dtype(original) or
is_timedelta64_dtype(original) or
is_period_dtype(original)):
na_value = na_value_for_dtype(original.dtype)
else:
na_value = None
labels, uniques = _factorize_array(values,
na_sentinel=na_sentinel,
size_hint=size_hint,
na_value=na_value)
if sort and len(uniques) > 0:
from pandas.core.sorting import safe_sort
try:
order = uniques.argsort()
order2 = order.argsort()
labels = take_1d(order2, labels, fill_value=na_sentinel)
uniques = uniques.take(order)
except TypeError:
# Mixed types, where uniques.argsort fails.
uniques, labels = safe_sort(uniques, labels,
na_sentinel=na_sentinel,
assume_unique=True)
uniques = _reconstruct_data(uniques, dtype, original)
# return original tenor
if isinstance(original, ABCIndexClass):
uniques = original._shallow_copy(uniques, name=None)
elif isinstance(original, ABCSeries):
from pandas import Index
uniques = Index(uniques)
return labels, uniques
def value_counts(values, sort=True, ascending=False, normalize=False,
bins=None, dropna=True):
"""
Compute a histogram of the counts of non-null values.
Parameters
----------
values : ndarray (1-d)
sort : boolean, default True
Sort by values
ascending : boolean, default False
Sort in ascending order
normalize: boolean, default False
If True then compute a relative histogram
bins : integer, optional
Rather than count values, group them into half-open bins,
convenience for pd.cut, only works with numeric data
dropna : boolean, default True
Don't include counts of NaN
Returns
-------
value_counts : Series
"""
from pandas.core.series import Series, Index
name = getattr(values, 'name', None)
if bins is not None:
try:
from pandas.core.reshape.tile import cut
values = Series(values)
ii = cut(values, bins, include_lowest=True)
except TypeError:
raise TypeError("bins argument only works with numeric data.")
# count, remove nulls (from the index), and but the bins
result = ii.value_counts(dropna=dropna)
result = result[result.index.notna()]
result.index = result.index.astype('interval')
result = result.sort_index()
# if we are dropna and we have NO values
if dropna and (result.values == 0).all():
result = result.iloc[0:0]
# normalizing is by len of all (regardless of dropna)
counts = np.array([len(ii)])
else:
if is_extension_array_dtype(values) or is_sparse(values):
# handle Categorical and sparse,
result = Series(values)._values.value_counts(dropna=dropna)
result.name = name
counts = result.values
else:
keys, counts = _value_counts_arraylike(values, dropna)
if not isinstance(keys, Index):
keys = Index(keys)
result = Series(counts, index=keys, name=name)
if sort:
result = result.sort_values(ascending=ascending)
if normalize:
result = result / float(counts.sum())
return result
def _value_counts_arraylike(values, dropna):
"""
Parameters
----------
values : arraylike
dropna : boolean
Returns
-------
(uniques, counts)
"""
values = _ensure_arraylike(values)
original = values
values, dtype, ndtype = _ensure_data(values)
if needs_i8_conversion(dtype):
# i8
keys, counts = htable.value_count_int64(values, dropna)
if dropna:
msk = keys != iNaT
keys, counts = keys[msk], counts[msk]
else:
# ndarray like
# TODO: handle uint8
f = getattr(htable, "value_count_{dtype}".format(dtype=ndtype))
keys, counts = f(values, dropna)
mask = isna(values)
if not dropna and mask.any():
if not isna(keys).any():
keys = np.insert(keys, 0, np.NaN)
counts = np.insert(counts, 0, mask.sum())
keys = _reconstruct_data(keys, original.dtype, original)
return keys, counts
def duplicated(values, keep='first'):
"""
Return boolean ndarray denoting duplicate values.
.. versionadded:: 0.19.0
Parameters
----------
values : ndarray-like
Array over which to check for duplicate values.
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last
occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : ndarray
"""
values, dtype, ndtype = _ensure_data(values)
f = getattr(htable, "duplicated_{dtype}".format(dtype=ndtype))
return f(values, keep=keep)
def mode(values, dropna=True):
"""
Returns the mode(s) of an array.
Parameters
----------
values : array-like
Array over which to check for duplicate values.
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
mode : Series
"""
from pandas import Series
values = _ensure_arraylike(values)
original = values
# categorical is a fast-path
if is_categorical_dtype(values):
if isinstance(values, Series):
return Series(values.values.mode(dropna=dropna), name=values.name)
return values.mode(dropna=dropna)
if dropna and is_datetimelike(values):
mask = values.isnull()
values = values[~mask]
values, dtype, ndtype = _ensure_data(values)
f = getattr(htable, "mode_{dtype}".format(dtype=ndtype))
result = f(values, dropna=dropna)
try:
result = np.sort(result)
except TypeError as e:
warn("Unable to sort modes: {error}".format(error=e))
result = _reconstruct_data(result, original.dtype, original)
return Series(result)
def rank(values, axis=0, method='average', na_option='keep',
ascending=True, pct=False):
"""
Rank the values along a given axis.
Parameters
----------
values : array-like
Array whose values will be ranked. The number of dimensions in this
array must not exceed 2.
axis : int, default 0
Axis over which to perform rankings.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
The method by which tiebreaks are broken during the ranking.
na_option : {'keep', 'top'}, default 'keep'
The method by which NaNs are placed in the ranking.
- ``keep``: rank each NaN value with a NaN ranking
- ``top``: replace each NaN with either +/- inf so that they
there are ranked at the top
ascending : boolean, default True
Whether or not the elements should be ranked in ascending order.
pct : boolean, default False
Whether or not to the display the returned rankings in integer form
(e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
"""
if values.ndim == 1:
f, values = _get_data_algo(values, _rank1d_functions)
ranks = f(values, ties_method=method, ascending=ascending,
na_option=na_option, pct=pct)
elif values.ndim == 2:
f, values = _get_data_algo(values, _rank2d_functions)
ranks = f(values, axis=axis, ties_method=method,
ascending=ascending, na_option=na_option, pct=pct)
else:
raise TypeError("Array with ndim > 2 are not supported.")
return ranks
def checked_add_with_arr(arr, b, arr_mask=None, b_mask=None):
"""
Perform array addition that checks for underflow and overflow.
Performs the addition of an int64 array and an int64 integer (or array)
but checks that they do not result in overflow first. For elements that
are indicated to be NaN, whether or not there is overflow for that element
is automatically ignored.
Parameters
----------
arr : array addend.
b : array or scalar addend.
arr_mask : boolean array or None
array indicating which elements to exclude from checking
b_mask : boolean array or boolean or None
array or scalar indicating which element(s) to exclude from checking
Returns
-------
sum : An array for elements x + b for each element x in arr if b is
a scalar or an array for elements x + y for each element pair
(x, y) in (arr, b).
Raises
------
OverflowError if any x + y exceeds the maximum or minimum int64 value.
"""
# For performance reasons, we broadcast 'b' to the new array 'b2'
# so that it has the same size as 'arr'.
b2 = np.broadcast_to(b, arr.shape)
if b_mask is not None:
# We do the same broadcasting for b_mask as well.
b2_mask = np.broadcast_to(b_mask, arr.shape)
else:
b2_mask = None
# For elements that are NaN, regardless of their value, we should
# ignore whether they overflow or not when doing the checked add.
if arr_mask is not None and b2_mask is not None:
not_nan = np.logical_not(arr_mask | b2_mask)
elif arr_mask is not None:
not_nan = np.logical_not(arr_mask)
elif b_mask is not None:
not_nan = np.logical_not(b2_mask)
else:
not_nan = np.empty(arr.shape, dtype=bool)
not_nan.fill(True)
# gh-14324: For each element in 'arr' and its corresponding element
# in 'b2', we check the sign of the element in 'b2'. If it is positive,
# we then check whether its sum with the element in 'arr' exceeds
# np.iinfo(np.int64).max. If so, we have an overflow error. If it
# it is negative, we then check whether its sum with the element in
# 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow
# error as well.
mask1 = b2 > 0
mask2 = b2 < 0
if not mask1.any():
to_raise = ((np.iinfo(np.int64).min - b2 > arr) & not_nan).any()
elif not mask2.any():
to_raise = ((np.iinfo(np.int64).max - b2 < arr) & not_nan).any()
else:
to_raise = (((np.iinfo(np.int64).max -
b2[mask1] < arr[mask1]) & not_nan[mask1]).any() or
((np.iinfo(np.int64).min -
b2[mask2] > arr[mask2]) & not_nan[mask2]).any())
if to_raise:
raise OverflowError("Overflow in int64 addition")
return arr + b
_rank1d_functions = {
'float64': algos.rank_1d_float64,
'int64': algos.rank_1d_int64,
'uint64': algos.rank_1d_uint64,
'object': algos.rank_1d_object
}
_rank2d_functions = {
'float64': algos.rank_2d_float64,
'int64': algos.rank_2d_int64,
'uint64': algos.rank_2d_uint64,
'object': algos.rank_2d_object
}
def quantile(x, q, interpolation_method='fraction'):
"""
Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
x : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
x = np.asarray(x)
mask = isna(x)
x = x[~mask]
values = np.sort(x)
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a) * fraction
def _get_score(at):
if len(values) == 0:
return np.nan
idx = at * (len(values) - 1)
if idx % 1 == 0:
score = values[int(idx)]
else:
if interpolation_method == 'fraction':
score = _interpolate(values[int(idx)], values[int(idx) + 1],
idx % 1)
elif interpolation_method == 'lower':
score = values[np.floor(idx)]
elif interpolation_method == 'higher':
score = values[np.ceil(idx)]
else:
raise ValueError("interpolation_method can only be 'fraction' "
", 'lower' or 'higher'")
return score
if is_scalar(q):
return _get_score(q)
else:
q = np.asarray(q, np.float64)
return algos.arrmap_float64(q, _get_score)
# --------------- #
# select n #
# --------------- #
class SelectN(object):
def __init__(self, obj, n, keep):
self.obj = obj
self.n = n
self.keep = keep
if self.keep not in ('first', 'last', 'all'):
raise ValueError('keep must be either "first", "last" or "all"')
def nlargest(self):
return self.compute('nlargest')
def nsmallest(self):
return self.compute('nsmallest')
@staticmethod
def is_valid_dtype_n_method(dtype):
"""
Helper function to determine if dtype is valid for
nsmallest/nlargest methods
"""
return ((is_numeric_dtype(dtype) and not is_complex_dtype(dtype)) or
needs_i8_conversion(dtype))
class SelectNSeries(SelectN):
"""
Implement n largest/smallest for Series
Parameters
----------
obj : Series
n : int
keep : {'first', 'last'}, default 'first'
Returns
-------
nordered : Series
"""
def compute(self, method):
n = self.n
dtype = self.obj.dtype
if not self.is_valid_dtype_n_method(dtype):
raise TypeError("Cannot use method '{method}' with "
"dtype {dtype}".format(method=method,
dtype=dtype))
if n <= 0:
return self.obj[[]]
dropped = self.obj.dropna()
# slow method
if n >= len(self.obj):
reverse_it = (self.keep == 'last' or method == 'nlargest')
ascending = method == 'nsmallest'
slc = np.s_[::-1] if reverse_it else np.s_[:]
return dropped[slc].sort_values(ascending=ascending).head(n)
# fast method
arr, pandas_dtype, _ = _ensure_data(dropped.values)
if method == 'nlargest':
arr = -arr
if is_integer_dtype(pandas_dtype):
# GH 21426: ensure reverse ordering at boundaries
arr -= 1
if self.keep == 'last':
arr = arr[::-1]
narr = len(arr)
n = min(n, narr)
kth_val = algos.kth_smallest(arr.copy(), n - 1)
ns, = np.nonzero(arr <= kth_val)
inds = ns[arr[ns].argsort(kind='mergesort')]
if self.keep != 'all':
inds = inds[:n]
if self.keep == 'last':
# reverse indices
inds = narr - 1 - inds
return dropped.iloc[inds]
class SelectNFrame(SelectN):
"""
Implement n largest/smallest for DataFrame
Parameters
----------
obj : DataFrame
n : int
keep : {'first', 'last'}, default 'first'
columns : list or str
Returns
-------
nordered : DataFrame
"""
def __init__(self, obj, n, keep, columns):
super(SelectNFrame, self).__init__(obj, n, keep)
if not is_list_like(columns) or isinstance(columns, tuple):
columns = [columns]
columns = list(columns)
self.columns = columns
def compute(self, method):
from pandas import Int64Index
n = self.n
frame = self.obj
columns = self.columns
for column in columns:
dtype = frame[column].dtype
if not self.is_valid_dtype_n_method(dtype):
raise TypeError((
"Column {column!r} has dtype {dtype}, cannot use method "
"{method!r} with this dtype"
).format(column=column, dtype=dtype, method=method))
def get_indexer(current_indexer, other_indexer):
"""Helper function to concat `current_indexer` and `other_indexer`
depending on `method`
"""
if method == 'nsmallest':
return current_indexer.append(other_indexer)
else:
return other_indexer.append(current_indexer)
# Below we save and reset the index in case index contains duplicates
original_index = frame.index
cur_frame = frame = frame.reset_index(drop=True)
cur_n = n
indexer = Int64Index([])
for i, column in enumerate(columns):
# For each column we apply method to cur_frame[column].
# If it's the last column or if we have the number of
# results desired we are done.
# Otherwise there are duplicates of the largest/smallest
# value and we need to look at the rest of the columns
# to determine which of the rows with the largest/smallest
# value in the column to keep.
series = cur_frame[column]
is_last_column = len(columns) - 1 == i
values = getattr(series, method)(
cur_n,
keep=self.keep if is_last_column else 'all')
if is_last_column or len(values) <= cur_n:
indexer = get_indexer(indexer, values.index)
break
# Now find all values which are equal to
# the (nsmallest: largest)/(nlarrgest: smallest)
# from our series.
border_value = values == values[values.index[-1]]
# Some of these values are among the top-n
# some aren't.
unsafe_values = values[border_value]
# These values are definitely among the top-n
safe_values = values[~border_value]
indexer = get_indexer(indexer, safe_values.index)
# Go on and separate the unsafe_values on the remaining
# columns.
cur_frame = cur_frame.loc[unsafe_values.index]
cur_n = n - len(indexer)
frame = frame.take(indexer)
# Restore the index on frame
frame.index = original_index.take(indexer)
# If there is only one column, the frame is already sorted.
if len(columns) == 1:
return frame
ascending = method == 'nsmallest'
return frame.sort_values(
columns,
ascending=ascending,
kind='mergesort')
# ------- ## ---- #
# take #
# ---- #
def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):
def wrapper(arr, indexer, out, fill_value=np.nan):
if arr_dtype is not None:
arr = arr.view(arr_dtype)
if out_dtype is not None:
out = out.view(out_dtype)
if fill_wrap is not None:
fill_value = fill_wrap(fill_value)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _convert_wrapper(f, conv_dtype):
def wrapper(arr, indexer, out, fill_value=np.nan):
arr = arr.astype(conv_dtype)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _take_2d_multi_object(arr, indexer, out, fill_value, mask_info):
# this is not ideal, performance-wise, but it's better than raising
# an exception (best to optimize in Cython to avoid getting here)
row_idx, col_idx = indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
if fill_value is not None:
if row_needs:
out[row_mask, :] = fill_value
if col_needs:
out[:, col_mask] = fill_value
for i in range(len(row_idx)):
u_ = row_idx[i]
for j in range(len(col_idx)):
v = col_idx[j]
out[i, j] = arr[u_, v]
def _take_nd_object(arr, indexer, out, axis, fill_value, mask_info):
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
if arr.dtype != out.dtype:
arr = arr.astype(out.dtype)
if arr.shape[axis] > 0:
arr.take(ensure_platform_int(indexer), axis=axis, out=out)
if needs_masking:
outindexer = [slice(None)] * arr.ndim
outindexer[axis] = mask
out[tuple(outindexer)] = fill_value
_take_1d_dict = {
('int8', 'int8'): algos.take_1d_int8_int8,
('int8', 'int32'): algos.take_1d_int8_int32,
('int8', 'int64'): algos.take_1d_int8_int64,
('int8', 'float64'): algos.take_1d_int8_float64,
('int16', 'int16'): algos.take_1d_int16_int16,
('int16', 'int32'): algos.take_1d_int16_int32,
('int16', 'int64'): algos.take_1d_int16_int64,
('int16', 'float64'): algos.take_1d_int16_float64,
('int32', 'int32'): algos.take_1d_int32_int32,
('int32', 'int64'): algos.take_1d_int32_int64,
('int32', 'float64'): algos.take_1d_int32_float64,
('int64', 'int64'): algos.take_1d_int64_int64,
('int64', 'float64'): algos.take_1d_int64_float64,
('float32', 'float32'): algos.take_1d_float32_float32,
('float32', 'float64'): algos.take_1d_float32_float64,
('float64', 'float64'): algos.take_1d_float64_float64,
('object', 'object'): algos.take_1d_object_object,
('bool', 'bool'): _view_wrapper(algos.take_1d_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_1d_bool_object, np.uint8,
None),
('datetime64[ns]', 'datetime64[ns]'): _view_wrapper(
algos.take_1d_int64_int64, np.int64, np.int64, np.int64)
}
_take_2d_axis0_dict = {
('int8', 'int8'): algos.take_2d_axis0_int8_int8,
('int8', 'int32'): algos.take_2d_axis0_int8_int32,
('int8', 'int64'): algos.take_2d_axis0_int8_int64,
('int8', 'float64'): algos.take_2d_axis0_int8_float64,
('int16', 'int16'): algos.take_2d_axis0_int16_int16,
('int16', 'int32'): algos.take_2d_axis0_int16_int32,
('int16', 'int64'): algos.take_2d_axis0_int16_int64,
('int16', 'float64'): algos.take_2d_axis0_int16_float64,
('int32', 'int32'): algos.take_2d_axis0_int32_int32,
('int32', 'int64'): algos.take_2d_axis0_int32_int64,
('int32', 'float64'): algos.take_2d_axis0_int32_float64,
('int64', 'int64'): algos.take_2d_axis0_int64_int64,
('int64', 'float64'): algos.take_2d_axis0_int64_float64,
('float32', 'float32'): algos.take_2d_axis0_float32_float32,
('float32', 'float64'): algos.take_2d_axis0_float32_float64,
('float64', 'float64'): algos.take_2d_axis0_float64_float64,
('object', 'object'): algos.take_2d_axis0_object_object,
('bool', 'bool'): _view_wrapper(algos.take_2d_axis0_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_2d_axis0_bool_object,
np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis0_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
_take_2d_axis1_dict = {
('int8', 'int8'): algos.take_2d_axis1_int8_int8,
('int8', 'int32'): algos.take_2d_axis1_int8_int32,
('int8', 'int64'): algos.take_2d_axis1_int8_int64,
('int8', 'float64'): algos.take_2d_axis1_int8_float64,
('int16', 'int16'): algos.take_2d_axis1_int16_int16,
('int16', 'int32'): algos.take_2d_axis1_int16_int32,
('int16', 'int64'): algos.take_2d_axis1_int16_int64,
('int16', 'float64'): algos.take_2d_axis1_int16_float64,
('int32', 'int32'): algos.take_2d_axis1_int32_int32,
('int32', 'int64'): algos.take_2d_axis1_int32_int64,
('int32', 'float64'): algos.take_2d_axis1_int32_float64,
('int64', 'int64'): algos.take_2d_axis1_int64_int64,
('int64', 'float64'): algos.take_2d_axis1_int64_float64,
('float32', 'float32'): algos.take_2d_axis1_float32_float32,
('float32', 'float64'): algos.take_2d_axis1_float32_float64,
('float64', 'float64'): algos.take_2d_axis1_float64_float64,
('object', 'object'): algos.take_2d_axis1_object_object,
('bool', 'bool'): _view_wrapper(algos.take_2d_axis1_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_2d_axis1_bool_object,
np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis1_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
_take_2d_multi_dict = {
('int8', 'int8'): algos.take_2d_multi_int8_int8,
('int8', 'int32'): algos.take_2d_multi_int8_int32,
('int8', 'int64'): algos.take_2d_multi_int8_int64,
('int8', 'float64'): algos.take_2d_multi_int8_float64,
('int16', 'int16'): algos.take_2d_multi_int16_int16,
('int16', 'int32'): algos.take_2d_multi_int16_int32,
('int16', 'int64'): algos.take_2d_multi_int16_int64,
('int16', 'float64'): algos.take_2d_multi_int16_float64,
('int32', 'int32'): algos.take_2d_multi_int32_int32,
('int32', 'int64'): algos.take_2d_multi_int32_int64,
('int32', 'float64'): algos.take_2d_multi_int32_float64,
('int64', 'int64'): algos.take_2d_multi_int64_int64,
('int64', 'float64'): algos.take_2d_multi_int64_float64,
('float32', 'float32'): algos.take_2d_multi_float32_float32,
('float32', 'float64'): algos.take_2d_multi_float32_float64,
('float64', 'float64'): algos.take_2d_multi_float64_float64,
('object', 'object'): algos.take_2d_multi_object_object,
('bool', 'bool'): _view_wrapper(algos.take_2d_multi_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_2d_multi_bool_object,
np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_multi_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
def _get_take_nd_function(ndim, arr_dtype, out_dtype, axis=0, mask_info=None):
if ndim <= 2:
tup = (arr_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
return func
tup = (out_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
func = _convert_wrapper(func, out_dtype)
return func
def func(arr, indexer, out, fill_value=np.nan):
indexer = ensure_int64(indexer)
_take_nd_object(arr, indexer, out, axis=axis, fill_value=fill_value,
mask_info=mask_info)
return func
def take(arr, indices, axis=0, allow_fill=False, fill_value=None):
"""
Take elements from an array.
.. versionadded:: 0.23.0
Parameters
----------
arr : sequence
Non array-likes (sequences without a dtype) are coerced
to an ndarray.
indices : sequence of integers
Indices to be taken.
axis : int, default 0
The axis over which to select values.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to :func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : any, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type (``self.dtype.na_value``) is used.
For multi-dimensional `arr`, each *element* is filled with
`fill_value`.
Returns
-------
ndarray or ExtensionArray
Same type as the input.
Raises
------
IndexError
When `indices` is out of bounds for the array.
ValueError
When the indexer contains negative values other than ``-1``
and `allow_fill` is True.
Notes
-----
When `allow_fill` is False, `indices` may be whatever dimensionality
is accepted by NumPy for `arr`.
When `allow_fill` is True, `indices` should be 1-D.
See Also
--------
numpy.take
Examples
--------
>>> from pandas.api.extensions import take
With the default ``allow_fill=False``, negative numbers indicate
positional indices from the right.
>>> take(np.array([10, 20, 30]), [0, 0, -1])
array([10, 10, 30])
Setting ``allow_fill=True`` will place `fill_value` in those positions.
>>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True)
array([10., 10., nan])
>>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True,
... fill_value=-10)
array([ 10, 10, -10])
"""
from pandas.core.indexing import validate_indices
if not is_array_like(arr):
arr = np.asarray(arr)
indices = np.asarray(indices, dtype=np.intp)
if allow_fill:
# Pandas style, -1 means NA
validate_indices(indices, len(arr))
result = take_1d(arr, indices, axis=axis, allow_fill=True,
fill_value=fill_value)
else:
# NumPy style
result = arr.take(indices, axis=axis)
return result
def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None,
allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
This dispatches to ``take`` defined on ExtensionArrays. It does not
currently dispatch to ``SparseArray.take`` for sparse ``arr``.
Parameters
----------
arr : array-like
Input array.
indexer : ndarray
1-D array of indices to take, subarrays corresponding to -1 value
indices are filed with fill_value
axis : int, default 0
Axis to take from
out : ndarray or None, default None
Optional output array, must be appropriate type to hold input and
fill_value together, if indexer has any -1 value entries; call
_maybe_promote to determine this type for any fill_value
fill_value : any, default np.nan
Fill value to replace -1 values with
mask_info : tuple of (ndarray, boolean)
If provided, value should correspond to:
(indexer != -1, (indexer != -1).any())
If not provided, it will be computed internally if necessary
allow_fill : boolean, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
Returns
-------
subarray : array-like
May be the same type as the input, or cast to an ndarray.
"""
# TODO(EA): Remove these if / elifs as datetimeTZ, interval, become EAs
# dispatch to internal type takes
if is_extension_array_dtype(arr):
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
elif is_datetimetz(arr):
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
elif is_interval_dtype(arr):
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
if is_sparse(arr):
arr = arr.get_values()
elif isinstance(arr, (ABCIndexClass, ABCSeries)):
arr = arr.values
arr = np.asarray(arr)
if indexer is None:
indexer = np.arange(arr.shape[axis], dtype=np.int64)
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
indexer = ensure_int64(indexer, copy=False)
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
mask_info = mask, needs_masking
if needs_masking:
if out is not None and out.dtype != dtype:
raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
flip_order = False
if arr.ndim == 2:
if arr.flags.f_contiguous:
flip_order = True
if flip_order:
arr = arr.T
axis = arr.ndim - axis - 1
if out is not None:
out = out.T
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = list(arr.shape)
out_shape[axis] = len(indexer)
out_shape = tuple(out_shape)
if arr.flags.f_contiguous and axis == arr.ndim - 1:
# minor tweak that can make an order-of-magnitude difference
# for dataframes initialized directly from 2-d ndarrays
# (s.t. df.values is c-contiguous and df._data.blocks[0] is its
# f-contiguous transpose)
out = np.empty(out_shape, dtype=dtype, order='F')
else:
out = np.empty(out_shape, dtype=dtype)
func = _get_take_nd_function(arr.ndim, arr.dtype, out.dtype, axis=axis,
mask_info=mask_info)
func(arr, indexer, out, fill_value)
if flip_order:
out = out.T
return out
take_1d = take_nd
def take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info=None,
allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
"""
if indexer is None or (indexer[0] is None and indexer[1] is None):
row_idx = np.arange(arr.shape[0], dtype=np.int64)
col_idx = np.arange(arr.shape[1], dtype=np.int64)
indexer = row_idx, col_idx
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
row_idx, col_idx = indexer
if row_idx is None:
row_idx = np.arange(arr.shape[0], dtype=np.int64)
else:
row_idx = ensure_int64(row_idx)
if col_idx is None:
col_idx = np.arange(arr.shape[1], dtype=np.int64)
else:
col_idx = ensure_int64(col_idx)
indexer = row_idx, col_idx
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
mask_info = (row_mask, col_mask), (row_needs, col_needs)
if row_needs or col_needs:
if out is not None and out.dtype != dtype:
raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = len(row_idx), len(col_idx)
out = np.empty(out_shape, dtype=dtype)
func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)
if func is None and arr.dtype != out.dtype:
func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)
if func is not None:
func = _convert_wrapper(func, out.dtype)
if func is None:
def func(arr, indexer, out, fill_value=np.nan):
_take_2d_multi_object(arr, indexer, out, fill_value=fill_value,
mask_info=mask_info)
func(arr, indexer, out=out, fill_value=fill_value)
return out
# ---- #
# diff #
# ---- #
_diff_special = {
'float64': algos.diff_2d_float64,
'float32': algos.diff_2d_float32,
'int64': algos.diff_2d_int64,
'int32': algos.diff_2d_int32,
'int16': algos.diff_2d_int16,
'int8': algos.diff_2d_int8,
}
def diff(arr, n, axis=0):
"""
difference of n between self,
analogous to s-s.shift(n)
Parameters
----------
arr : ndarray
n : int
number of periods
axis : int
axis to shift on
Returns
-------
shifted
"""
n = int(n)
na = np.nan
dtype = arr.dtype
is_timedelta = False
if needs_i8_conversion(arr):
dtype = np.float64
arr = arr.view('i8')
na = iNaT
is_timedelta = True
elif is_bool_dtype(dtype):
dtype = np.object_
elif is_integer_dtype(dtype):
dtype = np.float64
dtype = np.dtype(dtype)
out_arr = np.empty(arr.shape, dtype=dtype)
na_indexer = [slice(None)] * arr.ndim
na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)
out_arr[tuple(na_indexer)] = na
if arr.ndim == 2 and arr.dtype.name in _diff_special:
f = _diff_special[arr.dtype.name]
f(arr, out_arr, n, axis)
else:
res_indexer = [slice(None)] * arr.ndim
res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)
res_indexer = tuple(res_indexer)
lag_indexer = [slice(None)] * arr.ndim
lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
lag_indexer = tuple(lag_indexer)
# need to make sure that we account for na for datelike/timedelta
# we don't actually want to subtract these i8 numbers
if is_timedelta:
res = arr[res_indexer]
lag = arr[lag_indexer]
mask = (arr[res_indexer] == na) | (arr[lag_indexer] == na)
if mask.any():
res = res.copy()
res[mask] = 0
lag = lag.copy()
lag[mask] = 0
result = res - lag
result[mask] = na
out_arr[res_indexer] = result
else:
out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer]
if is_timedelta:
from pandas import TimedeltaIndex
out_arr = TimedeltaIndex(out_arr.ravel().astype('int64')).asi8.reshape(
out_arr.shape).astype('timedelta64[ns]')
return out_arr
| bsd-3-clause |
emmaggie/hmmlearn | hmmlearn/tests/test_hmm.py | 2 | 21345 | from __future__ import print_function
from unittest import TestCase
import numpy as np
from nose import SkipTest
from numpy.testing import assert_array_equal, assert_array_almost_equal
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn import mixture
from sklearn.utils import check_random_state
from hmmlearn import hmm
from hmmlearn.utils import normalize
rng = np.random.RandomState(0)
np.seterr(all='warn')
def train_hmm_and_keep_track_of_log_likelihood(hmm, obs, n_iter=1, **kwargs):
hmm.n_iter = 1
hmm.fit(obs)
loglikelihoods = []
for n in range(n_iter):
hmm.n_iter = 1
hmm.init_params = ''
hmm.fit(obs)
loglikelihoods.append(sum(hmm.score(x) for x in obs))
return loglikelihoods
class GaussianHMMBaseTester(object):
def setUp(self):
self.prng = prng = np.random.RandomState(10)
self.n_components = n_components = 3
self.n_features = n_features = 3
self.startprob = prng.rand(n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = prng.rand(n_components, n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, n_components))
self.means = prng.randint(-20, 20, (n_components, n_features))
self.covars = {
'spherical': (1.0 + 2 * np.dot(prng.rand(n_components, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)),
'diag': (1.0 + 2 * prng.rand(n_components, n_features)) ** 2,
'full': np.array([make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)
for x in range(n_components)]),
}
self.expanded_covars = {
'spherical': [np.eye(n_features) * cov
for cov in self.covars['spherical']],
'diag': [np.diag(cov) for cov in self.covars['diag']],
'tied': [self.covars['tied']] * n_components,
'full': self.covars['full'],
}
def test_bad_covariance_type(self):
hmm.GaussianHMM(20, self.covariance_type)
self.assertRaises(ValueError, hmm.GaussianHMM, 20,
'badcovariance_type')
def test_score_samples_and_decode(self):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.means_ = self.means
h.covars_ = self.covars[self.covariance_type]
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * h.means_
gaussidx = np.repeat(np.arange(self.n_components), 5)
nobs = len(gaussidx)
obs = self.prng.randn(nobs, self.n_features) + h.means_[gaussidx]
ll, posteriors = h.score_samples(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, gaussidx)
def test_sample(self, n=1000):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * self.means
h.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
h.startprob_ = self.startprob
samples = h.sample(n)[0]
self.assertEqual(samples.shape, (n, self.n_features))
def test_fit(self, params='stmc', n_iter=5, verbose=False, **kwargs):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.means_ = 20 * self.means
h.covars_ = self.covars[self.covariance_type]
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > -0.8,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, -0.8, self.covariance_type, trainll))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
def test_fit_with_length_one_signal(self):
obs = [self.prng.rand(10, self.n_features),
self.prng.rand(8, self.n_features),
self.prng.rand(1, self.n_features)]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: zero-size array to reduction operation maximum which
# has no identity
h.fit(obs)
def test_fit_with_priors(self, params='stmc', n_iter=5, verbose=False):
startprob_prior = 10 * self.startprob + 2.0
transmat_prior = 10 * self.transmat + 2.0
means_prior = self.means
means_weight = 2.0
covars_weight = 2.0
if self.covariance_type in ('full', 'tied'):
covars_weight += self.n_features
covars_prior = self.covars[self.covariance_type]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.startprob_prior = startprob_prior
h.transmat_ = normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.transmat_prior = transmat_prior
h.means_ = 20 * self.means
h.means_prior = means_prior
h.means_weight = means_weight
h.covars_ = self.covars[self.covariance_type]
h.covars_prior = covars_prior
h.covars_weight = covars_weight
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs[:1])
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test MAP train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
# XXX: Why such a large tolerance?
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_non_ergodic_transmat(self):
startprob = np.array([1, 0, 0, 0, 0])
transmat = np.array([[0.9, 0.1, 0, 0, 0],
[0, 0.9, 0.1, 0, 0],
[0, 0, 0.9, 0.1, 0],
[0, 0, 0, 0.9, 0.1],
[0, 0, 0, 0, 1.0]])
h = hmm.GaussianHMM(n_components=5,
covariance_type='full', startprob=startprob,
transmat=transmat, n_iter=100, init_params='st')
h.means_ = np.zeros((5, 10))
h.covars_ = np.tile(np.identity(10), (5, 1, 1))
obs = [h.sample(10)[0] for _ in range(10)]
h.fit(obs=obs)
class TestGaussianHMMWithSphericalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'spherical'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
class TestGaussianHMMWithDiagonalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'diag'
class TestGaussianHMMWithTiedCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGaussianHMMWithFullCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'full'
class MultinomialHMMTestCase(TestCase):
"""Using examples from Wikipedia
- http://en.wikipedia.org/wiki/Hidden_Markov_model
- http://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 2 # ('Rainy', 'Sunny')
self.n_symbols = 3 # ('walk', 'shop', 'clean')
self.emissionprob = [[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]]
self.startprob = [0.6, 0.4]
self.transmat = [[0.7, 0.3], [0.4, 0.6]]
self.h = hmm.MultinomialHMM(self.n_components,
startprob=self.startprob,
transmat=self.transmat)
self.h.emissionprob_ = self.emissionprob
def test_set_emissionprob(self):
h = hmm.MultinomialHMM(self.n_components)
emissionprob = np.array([[0.8, 0.2, 0.0], [0.7, 0.2, 1.0]])
h.emissionprob = emissionprob
assert np.allclose(emissionprob, h.emissionprob)
def test_wikipedia_viterbi_example(self):
# From http://en.wikipedia.org/wiki/Viterbi_algorithm:
# "This reveals that the observations ['walk', 'shop', 'clean']
# were most likely generated by states ['Sunny', 'Rainy',
# 'Rainy'], with probability 0.01344."
observations = [0, 1, 2]
logprob, state_sequence = self.h.decode(observations)
self.assertAlmostEqual(np.exp(logprob), 0.01344)
assert_array_equal(state_sequence, [1, 0, 0])
def test_decode_map_algorithm(self):
observations = [0, 1, 2]
h = hmm.MultinomialHMM(self.n_components, startprob=self.startprob,
transmat=self.transmat, algorithm="map",)
h.emissionprob_ = self.emissionprob
logprob, state_sequence = h.decode(observations)
assert_array_equal(state_sequence, [1, 0, 0])
def test_predict(self):
observations = [0, 1, 2]
state_sequence = self.h.predict(observations)
posteriors = self.h.predict_proba(observations)
assert_array_equal(state_sequence, [1, 0, 0])
assert_array_almost_equal(posteriors, [
[0.23170303, 0.76829697],
[0.62406281, 0.37593719],
[0.86397706, 0.13602294],
])
def test_attributes(self):
h = hmm.MultinomialHMM(self.n_components)
self.assertEqual(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, setattr, h, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, setattr, h, 'startprob_', [])
self.assertRaises(ValueError, setattr, h, 'startprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, setattr, h, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, setattr, h, 'transmat_', [])
self.assertRaises(ValueError, setattr, h, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
h.emissionprob_ = self.emissionprob
assert_array_almost_equal(h.emissionprob_, self.emissionprob)
self.assertRaises(ValueError, setattr, h, 'emissionprob_', [])
self.assertRaises(ValueError, setattr, h, 'emissionprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
self.assertEqual(h.n_symbols, self.n_symbols)
def test_score_samples(self):
idx = np.repeat(np.arange(self.n_components), 10)
nobs = len(idx)
obs = [int(x) for x in np.floor(self.prng.rand(nobs) * self.n_symbols)]
ll, posteriors = self.h.score_samples(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
def test_sample(self, n=1000):
samples = self.h.sample(n)[0]
self.assertEqual(len(samples), n)
self.assertEqual(len(np.unique(samples)), self.n_symbols)
def test_fit(self, params='ste', n_iter=5, verbose=False, **kwargs):
h = self.h
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.startprob_ = normalize(self.prng.rand(self.n_components))
h.transmat_ = normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.emissionprob_ = normalize(
self.prng.rand(self.n_components, self.n_symbols), axis=1)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
self.assertTrue(np.all(np.diff(trainll) > -1.e-3))
def test_fit_emissionprob(self):
self.test_fit('e')
def test_fit_with_init(self, params='ste', n_iter=5, verbose=False,
**kwargs):
h = self.h
learner = hmm.MultinomialHMM(self.n_components)
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# use init_function to initialize paramerters
learner._init(train_obs, params)
trainll = train_hmm_and_keep_track_of_log_likelihood(
learner, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print()
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
self.assertTrue(np.all(np.diff(trainll) > -1.e-3))
def test__check_input_symbols(self):
self.assertTrue(self.h._check_input_symbols([[0, 0, 2, 1, 3, 1, 1]]))
self.assertFalse(self.h._check_input_symbols([[0, 0, 3, 5, 10]]))
self.assertFalse(self.h._check_input_symbols([[0]]))
self.assertFalse(self.h._check_input_symbols([[0., 2., 1., 3.]]))
self.assertFalse(self.h._check_input_symbols([[0, 0, -2, 1, 3, 1, 1]]))
def create_random_gmm(n_mix, n_features, covariance_type, prng=0):
prng = check_random_state(prng)
g = mixture.GMM(n_mix, covariance_type=covariance_type)
g.means_ = prng.randint(-20, 20, (n_mix, n_features))
mincv = 0.1
g.covars_ = {
'spherical': (mincv + mincv * np.dot(prng.rand(n_mix, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features)),
'diag': (mincv + mincv * prng.rand(n_mix, n_features)) ** 2,
'full': np.array(
[make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features) for x in range(n_mix)])
}[covariance_type]
g.weights_ = normalize(prng.rand(n_mix))
return g
class GMMHMMBaseTester(object):
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 3
self.n_mix = 2
self.n_features = 2
self.covariance_type = 'diag'
self.startprob = self.prng.rand(self.n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = self.prng.rand(self.n_components, self.n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, self.n_components))
self.gmms_ = []
for state in range(self.n_components):
self.gmms_.append(create_random_gmm(
self.n_mix, self.n_features, self.covariance_type,
prng=self.prng))
def test_attributes(self):
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
self.assertEqual(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, setattr, h, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, setattr, h, 'startprob_', [])
self.assertRaises(ValueError, setattr, h, 'startprob_',
np.zeros((self.n_components - 2, self.n_features)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, setattr, h, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, setattr, h, 'transmat_', [])
self.assertRaises(ValueError, setattr, h, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
def test_score_samples_and_decode(self):
h = hmm.GMMHMM(self.n_components, gmms=self.gmms_)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
for g in h.gmms_:
g.means_ *= 20
refstateseq = np.repeat(np.arange(self.n_components), 5)
nobs = len(refstateseq)
obs = [h.gmms_[x].sample(1).flatten() for x in refstateseq]
ll, posteriors = h.score_samples(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, refstateseq)
def test_sample(self, n=1000):
h = hmm.GMMHMM(self.n_components, self.covariance_type,
startprob=self.startprob, transmat=self.transmat,
gmms=self.gmms_)
samples = h.sample(n)[0]
self.assertEqual(samples.shape, (n, self.n_features))
def test_fit(self, params='stmwc', n_iter=5, verbose=False, **kwargs):
h = hmm.GMMHMM(self.n_components, covars_prior=1.0)
h.startprob_ = self.startprob
h.transmat_ = normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.gmms_ = self.gmms_
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10, random_state=self.prng)[0]
for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
h.transmat_ = normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.startprob_ = normalize(self.prng.rand(self.n_components))
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
# XXX: this test appears to check that training log likelihood should
# never be decreasing (up to a tolerance of 0.5, why?) but this is not
# the case when the seed changes.
raise SkipTest("Unstable test: trainll is not always increasing "
"depending on seed")
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
class TestGMMHMMWithDiagCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'diag'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
def test_fit_means(self):
self.test_fit('m')
class TestGMMHMMWithTiedCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGMMHMMWithFullCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'full'
| bsd-3-clause |
leggitta/mne-python | examples/time_frequency/plot_source_space_time_frequency.py | 19 | 2314 | """
===================================================
Compute induced power in the source space with dSPM
===================================================
Returns STC files ie source estimates of induced power
for different bands in the source space. The inverse method
is linear based on dSPM inverse operator.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_band_induced_power
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
tmin, tmax, event_id = -0.2, 0.5, 1
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
# Load condition 1
event_id = 1
events = events[:10] # take 10 events to keep the computation time low
# Use linear detrend to reduce any edge artifacts
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6),
preload=True, detrend=1)
# Compute a source estimate per frequency band
bands = dict(alpha=[9, 11], beta=[18, 22])
stcs = source_band_induced_power(epochs, inverse_operator, bands, n_cycles=2,
use_fft=False, n_jobs=1)
for b, stc in stcs.iteritems():
stc.save('induced_power_%s' % b)
###############################################################################
# plot mean power
plt.plot(stcs['alpha'].times, stcs['alpha'].data.mean(axis=0), label='Alpha')
plt.plot(stcs['beta'].times, stcs['beta'].data.mean(axis=0), label='Beta')
plt.xlabel('Time (ms)')
plt.ylabel('Power')
plt.legend()
plt.title('Mean source induced power')
plt.show()
| bsd-3-clause |
RPGOne/Skynet | pactools-master/pactools/comodulogram.py | 1 | 34259 | import warnings
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d, interp2d
from .dar_model.base_dar import BaseDAR
from .dar_model.dar import DAR
from .dar_model.preprocess import multiple_extract_driver
from .utils.progress_bar import ProgressBar
from .utils.spectrum import Bicoherence, Coherence
from .utils.maths import norm, argmax_2d
from .utils.validation import check_array, check_random_state
from .utils.validation import check_consistent_shape
from .utils.viz import add_colorbar
from .bandpass_filter import multiple_band_pass
from .mne_api import MaskIterator
N_BINS_TORT = 18
STANDARD_PAC_METRICS = ['ozkurt', 'canolty', 'tort', 'penny', 'vanwijk']
DAR_BASED_PAC_METRICS = ['duprelatour', ]
COHERENCE_PAC_METRICS = ['jiang', 'colgin']
BICOHERENCE_PAC_METRICS = ['sigl', 'nagashima', 'hagihira', 'bispectrum']
ALL_PAC_METRICS = (STANDARD_PAC_METRICS + DAR_BASED_PAC_METRICS +
COHERENCE_PAC_METRICS + BICOHERENCE_PAC_METRICS)
class Comodulogram(object):
"""An object to compute the comodulogram for phase-amplitude coupling.
Parameters
----------
fs : float
Sampling frequency
low_fq_range : array or list
List of filtering frequencies (phase signal)
high_fq_range : array or list or 'auto'
List of filtering frequencies (amplitude signal)
If 'auto', it uses np.linspace(max(low_fq_range), fs / 2.0, 40).
low_fq_width : float
Bandwidth of the band-pass filter (phase signal)
high_fq_width : float or 'auto'
Bandwidth of the band-pass filter (amplitude signal)
If 'auto', it uses 2 * max(low_fq_range).
method : string or DAR instance
Modulation index method:
- String in ('ozkurt', 'canolty', 'tort', 'penny', ), for a PAC
estimation based on filtering and using the Hilbert transform.
- String in ('vanwijk', ) for a joint AAC and PAC estimation
based on filtering and using the Hilbert transform.
- String in ('sigl', 'nagashima', 'hagihira', 'bispectrum', ), for
a PAC estimation based on the bicoherence.
- String in ('colgin', ) for a PAC estimation
and in ('jiang', ) for a PAC directionality estimation,
based on filtering and computing coherence.
- String in ('duprelatour', ) or a DAR instance, for a PAC estimation
based on a driven autoregressive model.
n_surrogates : int
Number of surrogates computed for the z-score
If n_surrogates <= 1, the z-score is not computed.
vmin, vmax : float or None
If not None, it define the min/max value of the plot.
progress_bar : boolean
If True, a progress bar is shown in stdout.
ax_special : matplotlib.axes.Axes or None
If not None, a special figure is drawn on it, depending on
the PAC method used.
minimum_shift : float
Minimum time shift (in sec) for the surrogate analysis.
random_state : None, int or np.random.RandomState instance
Seed or random number generator for the surrogate analysis.
coherence_params : dict
Parameters for methods base on coherence or bicoherence.
May contain:
-block_length : int
Block length
-fft_length : int or None
Length of the FFT
-step : int or None
Step between two blocks
If the dictionary is empty, default values will be applied based on
fs and low_fq_width, with 0.5 overlap windows and no zero-padding.
extract_params : dict
Parameters for DAR models driver extraction
low_fq_width_2 : float
Bandwidth of the band-pass filters centered on low_fq_range, for
the amplitude signal. Used only with 'vanwijk' method.
"""
def __init__(self, fs, low_fq_range, low_fq_width=2., high_fq_range='auto',
high_fq_width='auto', method='tort', n_surrogates=0,
vmin=None, vmax=None, progress_bar=True, ax_special=None,
minimum_shift=1.0, random_state=None, coherence_params=dict(),
extract_params=dict(), low_fq_width_2=4.0):
self.fs = fs
self.low_fq_range = low_fq_range
self.low_fq_width = low_fq_width
self.high_fq_range = high_fq_range
self.high_fq_width = high_fq_width
self.method = method
self.n_surrogates = n_surrogates
self.vmin = vmin
self.vmax = vmax
self.progress_bar = progress_bar
self.ax_special = ax_special
self.minimum_shift = minimum_shift
self.random_state = random_state
self.coherence_params = coherence_params
self.extract_params = extract_params
self.low_fq_width_2 = low_fq_width_2
def _check_params(self):
if self.high_fq_range == 'auto':
self.high_fq_range = np.linspace(
max(self.low_fq_range), self.fs / 2.0, 40)
if self.high_fq_width == 'auto':
self.high_fq_width = max(self.low_fq_range) * 2
self.random_state = check_random_state(self.random_state)
if isinstance(self.method, str):
self.method = self.method.lower()
self.fs = float(self.fs)
self.low_fq_range = np.atleast_1d(self.low_fq_range)
self.high_fq_range = np.atleast_1d(self.high_fq_range)
if self.ax_special is not None:
assert isinstance(self.ax_special, matplotlib.axes.Axes)
if self.low_fq_range.size > 1:
raise ValueError("ax_special can only be used if low_fq_range "
"contains only one frequency.")
def fit(self, low_sig, high_sig=None, mask=None):
"""Call fit to compute the comodulogram.
Parameters
----------
low_sig : array, shape (n_epochs, n_points)
Input data for the phase signal
high_sig : array or None, shape (n_epochs, n_points)
Input data for the amplitude signal.
If None, we use low_sig for both signals.
mask : array or list of array or None, shape (n_epochs, n_points)
The PAC is only evaluated where the mask is False.
Masking is done after filtering and Hilbert transform.
If the method computes the bicoherence, the mask has to be
unidimensional (n_points, ) and the same mask is applied on all
epochs.
If a list or a MaskIterator is given, the filtering is done only
once and the comodulogram is computed on each mask.
Attributes
----------
comod_ : array, shape (len(low_fq_range), len(high_fq_range))
Comodulogram for each couple of frequencies.
If a list of mask is given, it returns a list of comodulograms.
"""
self._check_params()
low_sig = check_array(low_sig)
high_sig = check_array(high_sig, accept_none=True)
check_consistent_shape(low_sig, high_sig)
# check the masks
multiple_masks = (isinstance(mask, list)
or isinstance(mask, MaskIterator)
or (isinstance(mask, np.ndarray) and mask.ndim == 3))
if not multiple_masks:
mask = [mask]
if not isinstance(mask, MaskIterator):
mask = [check_array(m, dtype=bool, accept_none=True) for m in mask]
n_masks = len(mask)
if self.method in STANDARD_PAC_METRICS:
if high_sig is None:
high_sig = low_sig
if self.progress_bar:
self.progress_bar = ProgressBar(
'comodulogram: %s' % self.method,
max_value=self.low_fq_range.size * n_masks)
# compute a number of band-pass filtered signals
filtered_high = multiple_band_pass(
high_sig, self.fs, self.high_fq_range, self.high_fq_width)
filtered_low = multiple_band_pass(
low_sig, self.fs, self.low_fq_range, self.low_fq_width)
if self.method == 'vanwijk':
filtered_low_2 = multiple_band_pass(
low_sig, self.fs, self.low_fq_range, self.low_fq_width_2)
else:
filtered_low_2 = None
comod_list = []
for this_mask in mask:
comod = _comodulogram(self, filtered_low, filtered_high,
this_mask, filtered_low_2)
comod_list.append(comod)
elif self.method in COHERENCE_PAC_METRICS:
if high_sig is None:
high_sig = low_sig
if self.progress_bar:
self.progress_bar = ProgressBar('coherence: %s' % self.method,
max_value=n_masks)
# compute a number of band-pass filtered signals
filtered_high = multiple_band_pass(
high_sig, self.fs, self.high_fq_range, self.high_fq_width)
comod_list = []
for this_mask in mask:
comod = _coherence(self, low_sig, filtered_high, this_mask)
comod_list.append(comod)
if self.progress_bar:
self.progress_bar.update_with_increment_value(1)
# compute PAC with the bispectrum/bicoherence
elif self.method in BICOHERENCE_PAC_METRICS:
if high_sig is not None:
raise ValueError(
"Impossible to use a bicoherence method (%s) on two "
"signals, please try another method." % self.method)
if self.n_surrogates > 1:
raise NotImplementedError(
"Surrogate analysis with a bicoherence method (%s) "
"is not implemented." % self.method)
if self.progress_bar:
self.progress_bar = ProgressBar('bicoherence: %s' %
self.method, max_value=n_masks)
comod_list = []
for this_mask in mask:
comod = _bicoherence(self, sig=low_sig, mask=this_mask)
comod_list.append(comod)
if self.progress_bar:
self.progress_bar.update_with_increment_value(1)
elif isinstance(self.method,
BaseDAR) or self.method in DAR_BASED_PAC_METRICS:
comod_list = _driven_comodulogram(self, low_sig=low_sig,
high_sig=high_sig, mask=mask)
else:
raise ValueError('unknown method: %s' % self.method)
# remove very small values
for comod in comod_list:
comod[np.abs(comod) < 10 * np.finfo(np.float64).eps] = 0
if not multiple_masks:
self.comod_ = comod_list[0]
else:
self.comod_ = np.array(comod_list)
return self
def plot(self, titles=None, fig=None, axs=None, cmap=None, vmin=None,
vmax=None, unit='', cbar=True, label=True, contours=None,
tight_layout=True):
"""
Plot one or more comodulograms.
titles : list of string or None
List of titles for each comodulogram
axs : list or array of matplotlib.axes.Axes
Axes where the comodulograms are drawn. If None, a new figure is
created. Typical use is: fig, axs = plt.subplots(3, 4)
cmap : colormap or None
Colormap used in the plot. If None, it uses 'viridis' colormap.
vmin, vmax : float or None
If not None, they define the min/max value of the plot, else
they are set to (0, comodulograms.max()).
unit : string (default: '')
Unit of the comodulogram
cbar : boolean
Display colorbar or not
label : boolean
Display labels or not
contours : None or float
If not None, contours will be added around values above contours
value.
tight_layout : boolean
Use tight_layout or not
"""
if self.comod_.ndim == 2:
self.comod_ = self.comod_[None, :, :]
n_comod, n_low, n_high = self.comod_.shape
if axs is None:
n_lines = int(np.sqrt(n_comod))
n_columns = int(np.ceil(n_comod / float(n_lines)))
fig, axs = plt.subplots(n_lines, n_columns,
figsize=(4 * n_columns, 3 * n_lines))
else:
fig = axs[0].figure
axs = np.array(axs).ravel()
if vmin is None and vmax is None:
vmin = min(0, self.comod_.min())
vmax = max(0, self.comod_.max())
if vmin < 0 and vmax > 0:
vmax = max(vmax, -vmin)
vmin = -vmax
if cmap is None:
cmap = plt.get_cmap('RdBu_r')
if cmap is None:
cmap = plt.get_cmap('viridis')
n_channels, n_low_fq, n_high_fq = self.comod_.shape
extent = [
self.low_fq_range[0],
self.low_fq_range[-1],
self.high_fq_range[0],
self.high_fq_range[-1],
]
# plot the image
for i in range(n_channels):
cax = axs[i].imshow(self.comod_[i].T, cmap=cmap, vmin=vmin,
vmax=vmax, aspect='auto', origin='lower',
extent=extent, interpolation='none')
if titles is not None:
axs[i].set_title(titles[i], fontsize=12)
if contours is not None:
axs[i].contour(self.comod_[i].T,
levels=np.atleast_1d(contours), colors='w',
origin='lower', extent=extent)
if label:
axs[-1].set_xlabel('Driver frequency (Hz)')
axs[0].set_ylabel('Signal frequency (Hz)')
if tight_layout:
fig.tight_layout()
if cbar:
# plot the colorbar once
ax = axs[0] if len(axs) == 1 else None
add_colorbar(fig, cax, vmin, vmax, unit=unit, ax=ax)
return fig
def get_maximum_pac(self):
"""Get maximum PAC value in a comodulogram.
'low_fq_range' and 'high_fq_range' must be the same than used in the
modulation_index function that computed 'comodulograms'.
Returns
-------
low_fq : float or array, shape (n_comod, )
Low frequency of maximum PAC
high_fq : float or array, shape (n_comod, )
High frequency of maximum PAC
pac_value : float or array, shape (n_comod, )
Maximum PAC value
"""
# only one comodulogram
return_array = True
if self.comod_.ndim == 2:
self.comod_ = self.comod_[None, :, :]
return_array = False
# check that the sizes match
n_comod, n_low, n_high = self.comod_.shape
# compute the maximum of the comodulogram, and get the frequencies
max_pac_value = np.zeros(n_comod)
low_fq = np.zeros(n_comod)
high_fq = np.zeros(n_comod)
for k, comodulogram in enumerate(self.comod_):
i, j = argmax_2d(comodulogram)
max_pac_value[k] = comodulogram[i, j]
low_fq[k] = self.low_fq_range[i]
high_fq[k] = self.high_fq_range[j]
# return arrays or floats
if return_array:
return low_fq, high_fq, max_pac_value
else:
return low_fq[0], high_fq[0], max_pac_value[0]
def _comodulogram(estimator, filtered_low, filtered_high, mask,
filtered_low_2):
"""
Helper function to compute the comodulogram.
Used by PAC method in STANDARD_PAC_METRICS.
"""
# The modulation index is only computed where mask is True
if mask is not None:
filtered_low = filtered_low[:, ~mask]
filtered_high = filtered_high[:, ~mask]
if estimator.method == 'vanwijk':
filtered_low_2 = filtered_low_2[:, ~mask]
else:
filtered_low = filtered_low.reshape(filtered_low.shape[0], -1)
filtered_high = filtered_high.reshape(filtered_high.shape[0], -1)
if estimator.method == 'vanwijk':
filtered_low_2 = filtered_low_2.reshape(filtered_low_2.shape[0],
-1)
n_low, n_points = filtered_low.shape
n_high, _ = filtered_high.shape
# phase of the low frequency signals
for i in range(n_low):
filtered_low[i] = np.angle(filtered_low[i])
filtered_low = np.real(filtered_low)
# amplitude of the high frequency signals
filtered_high = np.real(np.abs(filtered_high))
norm_a = np.zeros(n_high)
if estimator.method == 'ozkurt':
for j in range(n_high):
norm_a[j] = norm(filtered_high[j])
# amplitude of the low frequency signals
if estimator.method == 'vanwijk':
for i in range(n_low):
filtered_low_2[i] = np.abs(filtered_low_2[i])
filtered_low_2 = np.real(filtered_low_2)
# Calculate the modulation index for each couple
comod = np.zeros((n_low, n_high))
for i in range(n_low):
# preproces the phase array
if estimator.method == 'tort':
n_bins = N_BINS_TORT
phase_bins = np.linspace(-np.pi, np.pi, n_bins + 1)
# get the indices of the bins to which each value in input belongs
phase_preprocessed = np.digitize(filtered_low[i], phase_bins) - 1
elif estimator.method == 'penny':
phase_preprocessed = np.c_[np.ones_like(filtered_low[i]), np.cos(
filtered_low[i]), np.sin(filtered_low[i])]
elif estimator.method == 'vanwijk':
phase_preprocessed = np.c_[np.ones_like(filtered_low[i]), np.cos(
filtered_low[i]), np.sin(filtered_low[i]), filtered_low_2[i]]
elif estimator.method in ('canolty', 'ozkurt'):
phase_preprocessed = np.exp(1j * filtered_low[i])
else:
raise ValueError('Unknown method %s.' % estimator.method)
for j in range(n_high):
def comod_function(shift):
return _one_modulation_index(
amplitude=filtered_high[j],
phase_preprocessed=phase_preprocessed, norm_a=norm_a[j],
method=estimator.method, shift=shift,
ax_special=estimator.ax_special)
comod[i, j] = _surrogate_analysis(
comod_function, estimator.fs, n_points,
estimator.minimum_shift, estimator.random_state,
estimator.n_surrogates)
if estimator.progress_bar:
estimator.progress_bar.update_with_increment_value(1)
return comod
def _one_modulation_index(amplitude, phase_preprocessed, norm_a, method, shift,
ax_special):
"""
Compute one modulation index.
Used by PAC method in STANDARD_PAC_METRICS.
"""
# shift for the surrogate analysis
if shift != 0:
phase_preprocessed = np.roll(phase_preprocessed, shift)
# Modulation index as in [Ozkurt & al 2011]
if method == 'ozkurt':
MI = np.abs(np.mean(amplitude * phase_preprocessed))
MI *= np.sqrt(amplitude.size) / norm_a
# Generalized linear models as in [Penny & al 2008] or [van Wijk & al 2015]
elif method in ('penny', 'vanwijk'):
# solve a linear regression problem:
# amplitude = np.dot(phase_preprocessed) * beta
PtP = np.dot(phase_preprocessed.T, phase_preprocessed)
PtA = np.dot(phase_preprocessed.T, amplitude[:, None])
beta = np.linalg.solve(PtP, PtA)
residual = amplitude - np.dot(phase_preprocessed, beta).ravel()
variance_amplitude = np.var(amplitude)
variance_residual = np.var(residual)
MI = (variance_amplitude - variance_residual) / variance_amplitude
# Modulation index as in [Canolty & al 2006]
elif method == 'canolty':
z_array = amplitude * phase_preprocessed
MI = np.abs(np.mean(z_array))
if ax_special is not None and shift == 0:
ax_special.plot(np.real(z_array), np.imag(z_array))
ax_special.set_ylabel('Imaginary part of z(t)')
ax_special.set_xlabel('Real part of z(t)')
ax_special.set_title("Canolty's modulation index: %.3f" % MI)
ax_special.grid('on')
# Modulation index as in [Tort & al 2010]
elif method == 'tort':
# mean amplitude distribution along phase bins
n_bins = N_BINS_TORT
amplitude_dist = np.ones(n_bins) # default is 1 to avoid log(0)
for b in np.unique(phase_preprocessed):
selection = amplitude[phase_preprocessed == b]
amplitude_dist[b] = np.mean(selection)
# Kullback-Leibler divergence of the distribution vs uniform
amplitude_dist /= np.sum(amplitude_dist)
divergence_kl = np.sum(amplitude_dist *
np.log(amplitude_dist * n_bins))
MI = divergence_kl / np.log(n_bins)
if ax_special is not None and shift == 0:
phase_bins = np.linspace(-np.pi, np.pi, n_bins + 1)
phase_bins = 0.5 * (phase_bins[:-1] + phase_bins[1:]) / np.pi * 180
ax_special.plot(phase_bins, amplitude_dist, '.-')
ax_special.plot(phase_bins, np.ones(n_bins) / n_bins, '--')
ax_special.set_ylim((0, 2. / n_bins))
ax_special.set_xlim((-180, 180))
ax_special.set_ylabel('Normalized mean amplitude')
ax_special.set_xlabel('Phase (in degree)')
ax_special.set_title("Tort's modulation index: %.3f" % MI)
else:
raise ValueError("Unknown method: %s" % (method, ))
return MI
def _same_mask_on_all_epochs(sig, mask, method):
"""
PAC metrics based on coherence or bicoherence,
the same mask is applied on all epochs.
"""
mask = np.squeeze(mask)
if mask.ndim > 1:
warnings.warn("For coherence methods (e.g. %s) the mask has "
"to be unidimensional, and the same mask is "
"applied on all epochs. Got shape %s, so only the "
"first row of the mask is used." % (
method,
mask.shape, ), UserWarning)
mask = mask[0, :]
sig = sig[..., ~mask]
return sig
def _bicoherence(estimator, sig, mask):
"""
Helper function for the comodulogram.
Used by PAC method in BICOHERENCE_PAC_METRICS.
"""
# The modulation index is only computed where mask is True
if mask is not None:
sig = _same_mask_on_all_epochs(sig, mask, estimator.method)
n_epochs, n_points = sig.shape
coherence_params = _define_default_coherence_params(
estimator.fs, estimator.low_fq_width, estimator.method,
**estimator.coherence_params)
model = Bicoherence(**coherence_params)
bicoh = model.fit(sigs=sig, method=estimator.method)
# remove the redundant part
n_freq = bicoh.shape[0]
np.flipud(bicoh)[np.triu_indices(n_freq, 1)] = 0
bicoh[np.triu_indices(n_freq, 1)] = 0
frequencies = np.linspace(0, estimator.fs / 2., n_freq)
comod = _interpolate(frequencies, frequencies, bicoh,
estimator.high_fq_range, estimator.low_fq_range)
return comod
def _define_default_coherence_params(fs, low_fq_width, method, **user_params):
"""
Define default values for Coherence and Bicoherence classes,
if not defined in user_params dictionary.
"""
# the FFT length is chosen to have a frequency resolution of low_fq_width
fft_length = fs / low_fq_width
# but it is faster if it is a power of 2
fft_length = 2 ** int(np.ceil(np.log2(fft_length)))
# smoothing for bicoherence methods
if method in BICOHERENCE_PAC_METRICS:
fft_length /= 4
# not smoothed for because we convolve after
if method == 'jiang':
fft_length *= 2
# the block length is chosen to avoid zero-padding
block_length = fft_length
if 'block_length' not in user_params and 'fft_length' not in user_params:
user_params['block_length'] = block_length
user_params['fft_length'] = fft_length
elif 'block_length' in user_params and 'fft_length' not in user_params:
user_params['fft_length'] = user_params['block_length']
elif 'block_length' not in user_params and 'fft_length' in user_params:
user_params['block_length'] = user_params['fft_length']
if 'fs' not in user_params:
user_params['fs'] = fs
if 'step' not in user_params:
user_params['step'] = None
return user_params
def _coherence(estimator, low_sig, filtered_high, mask):
"""
Helper function to compute the comodulogram.
Used by PAC method in COHERENCE_PAC_METRICS.
"""
if mask is not None:
low_sig = _same_mask_on_all_epochs(low_sig, mask, estimator.method)
filtered_high = _same_mask_on_all_epochs(filtered_high, mask,
estimator.method)
# amplitude of the high frequency signals
filtered_high = np.real(np.abs(filtered_high))
coherence_params = _define_default_coherence_params(
estimator.fs, estimator.low_fq_width, estimator.method,
**estimator.coherence_params)
n_epochs, n_points = low_sig.shape
def comod_function(shift):
return _one_coherence_modulation_index(
estimator.fs, low_sig, filtered_high, estimator.method,
estimator.low_fq_range, coherence_params, shift)
comod = _surrogate_analysis(comod_function, estimator.fs, n_points,
estimator.minimum_shift,
estimator.random_state, estimator.n_surrogates)
return comod
def _one_coherence_modulation_index(fs, low_sig, filtered_high, method,
low_fq_range, coherence_params, shift):
"""
Compute one modulation index.
Used by PAC method in COHERENCE_PAC_METRICS.
"""
if shift != 0:
low_sig = np.roll(low_sig, shift)
# the actual frequency resolution is computed here
delta_freq = fs / coherence_params['fft_length']
model = Coherence(**coherence_params)
coherence = model.fit(low_sig[None, :, :], filtered_high)[0]
n_high, n_freq = coherence.shape
frequencies = np.linspace(0, fs / 2., n_freq)
# Coherence as in [Colgin & al 2009]
if method == 'colgin':
coherence = np.real(np.abs(coherence))
comod = _interpolate(
np.arange(n_high), frequencies, coherence,
np.arange(n_high), low_fq_range)
# Phase slope index as in [Jiang & al 2015]
elif method == 'jiang':
product = coherence[:, 1:] * np.conjugate(coherence[:, :-1])
# we use a kernel of (ker * 2) with respect to the product,
# i.e. a kernel of (ker * 2 +1) with respect to the coherence.
ker = 2
kernel = np.ones(2 * ker) / (2 * ker)
phase_slope_index = np.zeros((n_high, n_freq - (2 * ker)),
dtype=np.complex128)
for i in range(n_high):
phase_slope_index[i] = np.convolve(product[i], kernel, 'valid')
phase_slope_index = np.imag(phase_slope_index)
frequencies = frequencies[ker:-ker]
# transform the phase slope index into an approximated delay
delay = phase_slope_index / (2. * np.pi * delta_freq)
comod = _interpolate(
np.arange(n_high), frequencies, delay,
np.arange(n_high), low_fq_range)
else:
raise ValueError('Unknown method %s' % (method, ))
return comod
def _interpolate(x1, y1, z1, x2, y2):
"""Helper to interpolate in 1d or 2d
We interpolate to get the same shape than with other methods.
"""
if x1.size > 1 and y1.size > 1:
func = interp2d(x1, y1, z1.T, kind='linear', bounds_error=False)
z2 = func(x2, y2)
elif x1.size == 1 and y1.size > 1:
func = interp1d(y1, z1.ravel(), kind='linear', bounds_error=False)
z2 = func(y2)
elif y1.size == 1 and x1.size > 1:
func = interp1d(x1, z1.ravel(), kind='linear', bounds_error=False)
z2 = func(x2)
else:
raise ValueError("Can't interpolate a scalar.")
# interp2d is not intuitive and return this shape:
z2.shape = (y2.size, x2.size)
return z2
def _driven_comodulogram(estimator, low_sig, high_sig, mask):
"""
Helper function for the comodulogram.
Used by PAC method in DAR_BASED_PAC_METRICS.
"""
model = estimator.method
if model == 'duprelatour':
model = DAR(ordar=10, ordriv=1)
sigdriv_imag = None
if high_sig is None:
sigs = low_sig
else:
# hack to call only once extract
high_sig = np.atleast_2d(high_sig)
sigs = np.r_[low_sig, high_sig]
n_epochs = low_sig.shape[0]
extract_complex = estimator.extract_params.get('extract_complex', True)
comod_list = None
if estimator.progress_bar:
bar = ProgressBar(max_value=len(estimator.low_fq_range) * len(mask),
title='comodulogram: %s' %
model.get_title(name=True))
for j, filtered_signals in enumerate(
multiple_extract_driver(
sigs=sigs, fs=estimator.fs, bandwidth=estimator.low_fq_width,
frequency_range=estimator.low_fq_range, random_state=estimator.
random_state, **estimator.extract_params)):
if extract_complex:
filtered_low, filtered_high, filtered_low_imag = filtered_signals
else:
filtered_low, filtered_high = filtered_signals
if high_sig is None:
sigin = np.array(filtered_high)
sigdriv = np.array(filtered_low)
if extract_complex:
sigdriv_imag = np.array(filtered_low_imag)
else:
sigin = np.array(filtered_high[n_epochs:])
sigdriv = np.array(filtered_low[:n_epochs])
if extract_complex:
sigdriv_imag = np.array(filtered_low_imag[:n_epochs])
sigin /= np.std(sigin)
n_epochs, n_points = sigdriv.shape
for i_mask, this_mask in enumerate(mask):
def comod_function(shift):
return _one_driven_modulation_index(
estimator.fs, sigin, sigdriv, sigdriv_imag, model,
this_mask, estimator.high_fq_range, estimator.ax_special,
shift)
comod = _surrogate_analysis(comod_function, estimator.fs, n_points,
estimator.minimum_shift,
estimator.random_state,
estimator.n_surrogates)
# initialize the comodulogram arrays
if comod_list is None:
comod_list = []
for _ in range(len(mask)):
comod_list.append(
np.zeros((estimator.low_fq_range.size, comod.size)))
comod_list[i_mask][j, :] = comod
if estimator.progress_bar:
bar.update_with_increment_value(1)
return comod_list
def _one_driven_modulation_index(fs, sigin, sigdriv, sigdriv_imag, model, mask,
high_fq_range, ax_special, shift):
"""
Compute one modulation index.
Used by PAC method in DAR_BASED_PAC_METRICS.
"""
# shift for the surrogate analysis
if shift != 0:
sigdriv = np.roll(sigdriv, shift)
# fit the model DAR on the data
model.fit(fs=fs, sigin=sigin, sigdriv=sigdriv, sigdriv_imag=sigdriv_imag,
train_mask=mask)
# get PSD difference
spec, _, _, _ = model._amplitude_frequency()
# KL divergence for each phase, as in [Tort & al 2010]
n_freq, n_phases = spec.shape
spec = 10 ** (spec / 20)
spec = spec / np.sum(spec, axis=1)[:, None]
spec_diff = np.sum(spec * np.log(spec * n_phases), axis=1)
spec_diff /= np.log(n_phases)
# crop the spectrum to high_fq_range
frequencies = np.linspace(0, fs // 2, spec_diff.size)
spec_diff = np.interp(high_fq_range, frequencies, spec_diff)
if ax_special is not None and shift == 0:
model.plot(frange=[high_fq_range[0], high_fq_range[-1]], ax=ax_special)
return spec_diff
def _get_shifts(random_state, n_points, minimum_shift, fs, n_iterations):
""" Compute the shifts for the surrogate analysis"""
n_minimum_shift = max(1, int(fs * minimum_shift))
# shift at least minimum_shift seconds, i.e. n_minimum_shift points
if n_iterations > 1:
if n_points - n_minimum_shift < n_minimum_shift:
raise ValueError("The minimum shift is longer than half the "
"visible data.")
shifts = random_state.randint(
n_minimum_shift, n_points - n_minimum_shift, size=n_iterations)
else:
shifts = np.array([0])
# the first has no shift since this is for the initial computation
shifts[0] = 0
return shifts
def _surrogate_analysis(comod_function, fs, n_points, minimum_shift,
random_state, n_surrogates):
"""Call the comod function for several random time shifts,
then compute the z-score of the result distribution."""
# number of surrogates MIs
n_iterations = max(1, 1 + n_surrogates)
# pre compute all the random time shifts
shifts = _get_shifts(random_state, n_points, minimum_shift, fs,
n_iterations)
comod_list = []
for s, shift in enumerate(shifts):
comod_list.append(comod_function(shift))
comod_list = np.array(comod_list)
# the first has no shift
comod = comod_list[0, ...]
# here we compute the z-score
if n_iterations > 2:
comod -= np.mean(comod_list[1:, ...], axis=0)
comod /= np.std(comod_list[1:, ...], axis=0)
return comod
| bsd-3-clause |
buguen/pylayers | pylayers/simul/examples/ex_simulem_fur.py | 3 | 1157 | from pylayers.simul.simulem import *
from pylayers.signal.bsignal import *
from pylayers.measures.mesuwb import *
import matplotlib.pyplot as plt
from pylayers.gis.layout import *
#M=UWBMesure(173)
M=UWBMesure(13)
#M=UWBMesure(1)
cir=TUsignal()
cirf=TUsignal()
#cir.readcir("where2cir-tx001-rx145.mat","Tx001")
#cirf.readcir("where2-furcir-tx001-rx145.mat","Tx001")
cir.readcir("where2cir-tx002-rx012.mat","Tx002")
#cirf.readcir("where2-furcir-tx002-rx012.mat","Tx002")
#cir.readcir("where2cir-tx001-rx001.mat","Tx001")
#cirf.readcir("where2-furcir-tx001-rx001.mat","Tx001")
plt.ion()
fig = plt.figure()
fig.subplots_adjust(hspace=0.5)
ax1 = fig.add_subplot(411,title="points and layout")
L=Layout()
L.load('siradel-cut-fur.ini')
#L.build()
L.showGs(fig=fig,ax=ax1)
ax1.plot(M.tx[0],M.tx[1],'or')
#ax1.plot(M.rx[1][0],M.rx[1][1],'ob')
ax1.plot(M.rx[2][0],M.rx[2][1],'ob')
ax2 = fig.add_subplot(412,title="Measurement")
M.tdd.ch2.plot()
#ax3 = fig.add_subplot(413,title="Simulation with furniture",sharex=ax2,sharey=ax2)
#cirf.plot(col='red')
ax4 = fig.add_subplot(414,title="Simulation",sharex=ax2,sharey=ax2)
cir.plot(col='blue')
plt.show()
| lgpl-3.0 |
arnold-jr/sem-classify | semclassify/plots.py | 1 | 4028 | import matplotlib
import seaborn as sns
matplotlib.rcParams['savefig.dpi'] = 2 * matplotlib.rcParams['savefig.dpi']
matplotlib.rc('text', usetex=True)
from matplotlib import cm
import matplotlib.pyplot as plt
from pylab import imshow, show
import pandas as pd
pd.set_option('expand_frame_repr', False)
import numpy as np
from collections import OrderedDict
from skimage import io
from sklearn.metrics import confusion_matrix
from semclassify.helpers import stopwatch
class PaletteController():
labels = ['None','POR','HYD','QS','ILL','BFS','OPC','FAF','FAF_0','FAF_1','FAF_2']
numbers = range(0,len(labels)+1)
colors = ["#ffffff","#000000","#ddccff", "#0000ff", "#ff00ff", "#00ff00", "#00ffff", "#ff0000",
"#ff3333","#ff6666","#ff9999"]
rgb = [(int(s[1:3],16),int(s[3:5],16),int(s[5:7],16)) for s in colors]
label_num = OrderedDict(zip(labels,numbers))
num_label = OrderedDict(zip(numbers,labels))
label_color = OrderedDict(zip(labels,colors))
num_color = OrderedDict(zip(numbers,colors))
label_rgb = OrderedDict(zip(labels,rgb))
num_rgb = OrderedDict(zip(numbers,rgb))
cmap = matplotlib.colors.ListedColormap(colors,"mymap",len(colors))
cmap.set_bad('w',0)
cpal = sns.color_palette(colors)
def show_palette(self):
sns.palplot(self.cpal,)
if False:
ax = plt.gca()
ax.annotate('FAF', xy=(2,0), xytext=(2,0),
horizontalalignment="center",
verticalalignment="middle",
color="white",
fontsize=16,
)
plt.show()
def print_palette_info(self):
for d in (self.label_num, self.num_label, self.label_color):
for t in d.items():
print("%s -> %s" % t)
def plot_confusion_matrix(y_true,
y_pred,
title="Normalized confusion matrix",
cmap=plt.cm.Blues,
label_encoder=None):
""" Plots the confusion matrix for a set of labels and predictions
:param y_true: the matrix of integer class labels
:param y_pred: the array of predicted classes
:param title: plot title
:param cmap: matplotlib colormap
:return None
"""
cm = confusion_matrix(y_true, y_pred)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
labels = np.unique(y_true)
print(labels)
if label_encoder is not None:
labels = label_encoder.inverse_transform(labels)
print(labels)
df_cm = pd.DataFrame(cm_normalized, columns=labels, index=labels)
print('Normalized confusion matrix')
print(df_cm)
plt.figure()
plt.imshow(df_cm.as_matrix(),
interpolation='nearest',
cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(df_cm.index))
plt.xticks(tick_marks, df_cm.columns, rotation=0)
plt.yticks(tick_marks, df_cm.index)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
def plot_labeled_image(bse_image, label_image, title="./output/classified.png"):
""" Plots a BSE image above a label image
:param bse_image: np array of BSE image values
:param label_image: np array of label values"""
with sns.axes_style(style='dark'):
fig, axes = plt.subplots(2, 1, figsize=(3.25, 5.5))
axes[0].imshow(bse_image, cmap="gray")
plt.setp(axes[0].get_yticklabels(), visible=False)
plt.setp(axes[0].get_xticklabels(), visible=False)
axes[1].imshow(label_image)
plt.setp(axes[1].get_yticklabels(), visible=False)
plt.setp(axes[1].get_xticklabels(), visible=False)
fig.tight_layout()
# fig.savefig(title, format='png', pad_inches=0.0,)
plt.show()
if __name__ == "__main__":
# p = PaletteController()
# p.show_palette()
# p.print_palette_info()
# plt.show()
plot_confusion_matrix([0, 0, 1, 1, 2], [0, 0, 0, 1, 1])
# im = io.imread("/Users/joshuaarnold/Documents/Papers/VU_SEM/analysis/"
# "SEM-EDX-DATA/BFS/soi_001/TSV-TIFF/BSE.tif", flatten=True)
# plot_labeled_image(im, im)
| mit |
ClimbsRocks/scikit-learn | sklearn/tests/test_multioutput.py | 39 | 6609 | import numpy as np
import scipy.sparse as sp
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier
from sklearn.linear_model import Lasso
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier
def test_multi_target_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
for n in range(3):
rgr = GradientBoostingRegressor(random_state=0)
rgr.fit(X_train, y_train[:, n])
references[:,n] = rgr.predict(X_test)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X_train, y_train)
y_pred = rgr.predict(X_test)
assert_almost_equal(references, y_pred)
def test_multi_target_regression_one_target():
# Test multi target regression raises
X, y = datasets.make_regression(n_targets=1)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
assert_raises(ValueError, rgr.fit, X_train, y_train)
def test_multi_target_sparse_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
rgr = MultiOutputRegressor(Lasso(random_state=0))
rgr_sparse = MultiOutputRegressor(Lasso(random_state=0))
rgr.fit(X_train, y_train)
rgr_sparse.fit(sparse(X_train), y_train)
assert_almost_equal(rgr.predict(X_test), rgr_sparse.predict(sparse(X_test)))
def test_multi_target_sample_weights_api():
X = [[1,2,3], [4,5,6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [0.8, 0.6]
rgr = MultiOutputRegressor(Lasso())
assert_raises_regex(ValueError, "does not support sample weights",
rgr.fit, X, y, w)
# no exception should be raised if the base estimator supports weights
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y, w)
def test_multi_target_sample_weights():
# weighted regressor
Xw = [[1,2,3], [4,5,6]]
yw = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1,2,3], [1,2,3], [4,5,6]]
y = [[3.141, 2.718], [3.141, 2.718], [2.718, 3.141]]
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y)
X_test = [[1.5,2.5,3.5], [3.5,4.5,5.5]]
assert_almost_equal(rgr.predict(X_test), rgr_w.predict(X_test))
# Import the data
iris = datasets.load_iris()
# create a multiple targets by randomized shuffling and concatenating y.
X = iris.data
y1 = iris.target
y2 = shuffle(y1, random_state=1)
y3 = shuffle(y1, random_state=2)
y = np.column_stack((y1, y2, y3))
n_samples, n_features = X.shape
n_outputs = y.shape[1]
n_classes = len(np.unique(y1))
def test_multi_output_classification():
# test if multi_target initializes correctly with base estimator and fit
# assert predictions work as expected for predict, prodict_proba and score
forest = RandomForestClassifier(n_estimators=10, random_state=1)
multi_target_forest = MultiOutputClassifier(forest)
# train the multi_target_forest and also get the predictions.
multi_target_forest.fit(X, y)
predictions = multi_target_forest.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
predict_proba = multi_target_forest.predict_proba(X)
assert_equal((n_samples, n_classes, n_outputs), predict_proba.shape)
assert_array_equal(np.argmax(predict_proba, axis=1), predictions)
# train the forest with each column and assert that predictions are equal
for i in range(3):
forest_ = clone(forest) # create a clone with the same state
forest_.fit(X, y[:, i])
assert_equal(list(forest_.predict(X)), list(predictions[:, i]))
assert_array_equal(list(forest_.predict_proba(X)),
list(predict_proba[:, :, i]))
def test_multiclass_multioutput_estimator():
# test to check meta of meta estimators
svc = LinearSVC(random_state=0)
multi_class_svc = OneVsRestClassifier(svc)
multi_target_svc = MultiOutputClassifier(multi_class_svc)
multi_target_svc.fit(X, y)
predictions = multi_target_svc.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
# train the forest with each column and assert that predictions are equal
for i in range(3):
multi_class_svc_ = clone(multi_class_svc) # create a clone
multi_class_svc_.fit(X, y[:, i])
assert_equal(list(multi_class_svc_.predict(X)),
list(predictions[:, i]))
def test_multi_output_classification_sample_weights():
# weighted classifier
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3, 2], [2, 3]]
w = np.asarray([2., 1.])
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf_w = MultiOutputClassifier(forest)
clf_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3, 2], [3, 2], [2, 3]]
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf = MultiOutputClassifier(forest)
clf.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(clf.predict(X_test), clf_w.predict(X_test))
def test_multi_output_exceptions():
# NotFittedError when fit is not done but score, predict and
# and predict_proba are called
moc = MultiOutputClassifier(LinearSVC(random_state=0))
assert_raises(NotFittedError, moc.predict, y)
assert_raises(NotFittedError, moc.predict_proba, y)
assert_raises(NotFittedError, moc.score, X, y)
# ValueError when number of outputs is different
# for fit and score
y_new = np.column_stack((y1, y2))
moc.fit(X, y)
assert_raises(ValueError, moc.score, X, y_new)
| bsd-3-clause |
mprhode/malware-prediction-rnn | main.py | 1 | 1306 | import numpy as np
import pandas as pd
from copy import deepcopy
import gc
from col_headers import Header
from experiments import Experiments, Configs
from experiments.useful import timestamped_to_vector, unison_shuffled_copies, extract_neg
# Load data
headers = Header()
c = headers.classification_col
v = headers.vector_col
data = pd.read_csv("data.csv")
test = data[data["test_set"] == True]
train = data[data["test_set"] == False]
train = train.as_matrix(columns=headers.training_headers)
test = test.as_matrix(columns=headers.training_headers)
x_test, y_test = timestamped_to_vector(test, vector_col=v, time_start=0, classification_col=c)
x_train, y_train = timestamped_to_vector(train, vector_col=v, time_start=0, classification_col=c)
# Random search with thresholding
rand_params = Configs.get_all()
expt = Experiments.Experiment(rand_params, search_algorithm="random",
data=(x_train, y_train), folds=10, folder_name="random_search_reults",
thresholding=True, threshold=0.5)
# parameter configurations
A_B_C = Configs.get_A_B_C
# Ensemble model
ensemble_config = Experiments.Ensemble_configurations(list(A_B_C.values()),
x_test=x_test, y_test=y_test,
x_train=x_train, y_train=y_train,
folder_name="test_train_results",
batch_size=64)
ensemble_config.run_experiments() | apache-2.0 |
CKehl/pylearn2 | pylearn2/scripts/datasets/step_through_small_norb.py | 49 | 3123 | #! /usr/bin/env python
"""
A script for sequentially stepping through SmallNORB, viewing each image and
its label.
Intended as a demonstration of how to iterate through NORB images,
and as a way of testing SmallNORB's StereoViewConverter.
If you just want an image viewer, consider
pylearn2/scripts/show_binocular_grayscale_images.py,
which is not specific to SmallNORB.
"""
__author__ = "Matthew Koichi Grimes"
__copyright__ = "Copyright 2010-2014, Universite de Montreal"
__credits__ = __author__
__license__ = "3-clause BSD"
__maintainer__ = __author__
__email__ = "mkg alum mit edu (@..)"
import argparse, pickle, sys
from matplotlib import pyplot
from pylearn2.datasets.norb import SmallNORB
from pylearn2.utils import safe_zip
def main():
def parse_args():
parser = argparse.ArgumentParser(
description="Step-through visualizer for SmallNORB dataset")
parser.add_argument("--which_set",
default='train',
required=True,
help=("'train', 'test', or the path to a "
"SmallNORB .pkl file"))
return parser.parse_args()
def load_norb(args):
if args.which_set in ('test', 'train'):
return SmallNORB(args.which_set, True)
else:
norb_file = open(args.which_set)
return pickle.load(norb_file)
args = parse_args()
norb = load_norb(args)
topo_space = norb.view_converter.topo_space # does not include label space
vec_space = norb.get_data_specs()[0].components[0]
figure, axes = pyplot.subplots(1, 2, squeeze=True)
figure.suptitle("Press space to step through, or 'q' to quit.")
def draw_and_increment(iterator):
"""
Draws the image pair currently pointed at by the iterator,
then increments the iterator.
"""
def draw(batch_pair):
for axis, image_batch in safe_zip(axes, batch_pair):
assert image_batch.shape[0] == 1
grayscale_image = image_batch[0, :, :, 0]
axis.imshow(grayscale_image, cmap='gray')
figure.canvas.draw()
def get_values_and_increment(iterator):
try:
vec_stereo_pair, labels = norb_iter.next()
except StopIteration:
return (None, None)
topo_stereo_pair = vec_space.np_format_as(vec_stereo_pair,
topo_space)
return topo_stereo_pair, labels
batch_pair, labels = get_values_and_increment(norb_iter)
draw(batch_pair)
norb_iter = norb.iterator(mode='sequential',
batch_size=1,
data_specs=norb.get_data_specs())
def on_key_press(event):
if event.key == ' ':
draw_and_increment(norb_iter)
if event.key == 'q':
sys.exit(0)
figure.canvas.mpl_connect('key_press_event', on_key_press)
draw_and_increment(norb_iter)
pyplot.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
rwcarlsen/cyclus.github.com | source/numpydoc/docscrape_sphinx.py | 41 | 9437 | from __future__ import division, absolute_import, print_function
import sys, re, inspect, textwrap, pydoc
import sphinx
import collections
from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
NumpyDocString.__init__(self, docstring, config=config)
self.load_config(config)
def load_config(self, config):
self.use_plots = config.get('use_plots', False)
self.class_members_toctree = config.get('class_members_toctree', True)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_returns(self):
out = []
if self['Returns']:
out += self._str_field_list('Returns')
out += ['']
for param, param_type, desc in self['Returns']:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent([param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent(['**%s**' % param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (callable(param_obj)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
if param_obj and (pydoc.getdoc(param_obj) or not desc):
# Referenced object has a docstring
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::']
if self.class_members_toctree:
out += [' :toctree:']
out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_returns()
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.load_config(config)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.load_config(config)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
self.load_config(config)
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif isinstance(obj, collections.Callable):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
dsavoiu/kafe2 | kafe2/fit/xy/ensemble.py | 1 | 29429 | try:
import typing # help IDEs with type-hinting inside docstrings
except ImportError:
pass
import numpy as np
import scipy.stats
import six
from .._base import FitEnsembleBase, FitEnsembleException
from ..tools.ensemble import EnsembleVariable, EnsembleVariablePlotter
from .cost import XYCostFunction_Chi2
from .fit import XYFit
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib import gridspec as gs
__all__ = ["XYFitEnsemble"]
def _heuristic_optimal_subplot_grid_size(n_subplots, aspect_ratio_priority=0.5):
def f2(s, k):
if n_subplots > s * (s + k):
return 100000
return ((s * (s + k) - n_subplots) ** 2 * (1.0 - aspect_ratio_priority)
+ (float(k) / float(s)) ** 2 * (aspect_ratio_priority))
_optimal_f = np.inf
_optimal_sk = n_subplots, 0
for s in six.moves.range(1, n_subplots):
for k in six.moves.range(0, n_subplots):
_f = f2(s, k)
if _f < _optimal_f:
_optimal_f = _f
_optimal_sk = s, k
s, k = _optimal_sk
return s, s+k
class XYFitEnsembleException(FitEnsembleException):
pass
class XYFitEnsemble(FitEnsembleBase):
"""
Object for generating ensembles of fits to *xy* pseudo-data generated according to the
specified uncertainty model.
After constructing an :py:obj:`~kafe2.fit.XYFitEnsemble` object, an error model should be added
to it. This is done as for :py:obj:`~kafe2.fit.XYFit` objects by using the
:py:meth:`~kafe2.fit.XYFitEnsemble.add_error` or :py:meth:`~kafe2.fit.XYFitEnsemble.add_matrix_error`
methods.
Once an uncertainty model is provided, the fit ensemble can be generated by using the
:py:meth:`~kafe2.fit.XYFitEnsemble.run` method. This method starts by generating a pseudo-dataset in such a way
that the empirical distribution of the data corresponds to the specified uncertainty model. It then
fits the model to the pseudo-data and extracts information from the fit, such as the resulting parameter
values or the value of the cost function at the minimum. This is repeated a large number of times
in order to evaluate the whole ensemble in a statistically meaningful way.
The ensemble result can be visualized by using the :py:meth:`~kafe2.fit.XYFitEnsemble.plot_results` method.
.. TODO Expand section
"""
FIT_TYPE = XYFit
AVAILABLE_STATISTICS = {
'mean': EnsembleVariable.mean,
'mean_error': EnsembleVariable.mean_error,
'std': EnsembleVariable.std,
'skew': EnsembleVariable.skew,
'kurtosis': EnsembleVariable.kurtosis,
'cor_mat': EnsembleVariable.cor_mat,
'cov_mat': EnsembleVariable.cov_mat,
}
_DEFAULT_STATISTICS = {'mean', 'std'}
def __init__(self, n_experiments, x_support, model_function, model_parameters,
cost_function=XYCostFunction_Chi2(axes_to_use='y', errors_to_use='covariance'),
requested_results=None):
"""Construct an :py:obj:`~kafe2.fit.XYFitEnsemble` object.
:param n_experiments: Number of pseudo experiments to perform.
:type n_experiments: int
:param x_support: *x* values to use as support for calculating the "true" model
("true" *x*).
:type x_support: typing.Sequence[float]
:param model_function: The model function. Either a
:py:class:`~kafe2.fit.indexed.XYModelFunction` object or an unwrapped native Python
function.
:type model_function: typing.Callable
:param model_parameters: Parameters of the "true" model
:type model_parameters: typing.Sequence[float]
:param cost_function: The cost function used for the fits. Either a
:py:class:`~kafe2.fit._base.CostFunctionBase`-derived object or an unwrapped native
Python function.
:type cost_function: typing.Callable
:param requested_results: List of result variables to collect for each toy fit. If
:py:obj:`None` it will default to ``('y_pulls', 'parameter_pulls', 'cost')``.
:type requested_results: typing.Sequence[str] or None.
"""
self._n_exp = n_experiments
self._ref_x_data = np.asarray(x_support, dtype=float)
self._model_function = model_function
self._model_parameters = np.asarray(model_parameters)
self._cost_function = cost_function
self._n_par = len(self._model_parameters)
# initialize an `XYFit` object for performing the toy fits
# need some dummy initial data values in order to initialize a Fit object
self._ref_y_data = self._model_function(self._ref_x_data, *self._model_parameters)
# initialize Fit object used for fitting the pseudo-data
self._toy_fit = XYFit(xy_data=[self._ref_x_data, self._ref_y_data],
model_function=self._model_function,
cost_function=self._cost_function)
# set the model parameters of the toy fit to the reference values
self._set_toy_fit_parameters_to_reference()
# get reference quantities (y data, covariance matrices...) from toy fit
self._update_reference_quantities_from_toy_fit()
# store and validate names of requested ensemble variables
self._requested_results = requested_results
if self._requested_results is None:
self._requested_results = self._DEFAULT_RESULTS
else:
# validate list of results requested by user
_unavailable_results = set(self._requested_results) - set(self.AVAILABLE_RESULTS.keys())
if _unavailable_results:
raise ValueError("Requested unavailable result variable(s): %r"
% (_unavailable_results,))
# initialize `EnsembleVariable` objects to store ensembles
self._initialize_ensemble_variables()
def _set_toy_fit_parameters_to_reference(self):
"""set the model parameters of the toy fit to the reference values"""
self._toy_fit._param_model._model_parameters = self._model_parameters
self._toy_fit._param_model._pm_calculation_stale = True
def _generate_pseudodata(self):
"""generate new pseudo-data according to fit error model and commit to data container"""
if not self._toy_fit.has_errors:
raise FitEnsembleException("Cannot generate fit ensemble: no error model specified!")
# -- generate 'x' data
_x_data = self._ref_x_data.copy()
if self._toy_fit.data_container.has_x_errors:
# smear x data according to the total 'x' covariance matrix
# TODO: only gaussian smearing is implemented -> more?
_x_jitter = np.random.multivariate_normal(
np.zeros_like(_x_data),
self._ref_x_cov_mat)
_x_data += _x_jitter
_y_data = self._toy_fit.eval_model_function(x=_x_data,
model_parameters=self._model_parameters)
# smear y data according to the total 'y' covariance matrix
# TODO: only gaussian smearing is implemented -> more?
_y_jitter = np.random.multivariate_normal(
np.zeros_like(_y_data),
self._ref_y_cov_mat)
_y_data += _y_jitter
# update toy fit data container
self._toy_fit.data_container.x = _x_data
self._toy_fit.data_container.y = _y_data
def _gather_results_from_toy_fit(self, i_exp):
for _var_name in self._requested_results:
self._ensemble_variables[_var_name].set_value(index=i_exp, variable_value=self._get_var(_var_name))
def _do_toy_fit(self):
"""run fit with current pseudo-data"""
self._toy_fit.do_fit()
def _get_var(self, var_name):
"""get the value of the result variables for the current fit"""
return self.AVAILABLE_RESULTS[var_name].fget(self)
def _initialize_ensemble_variables(self):
self._ensemble_variables = {}
self._ensemble_variable_plotters = {}
if 'y_pulls' in self._requested_results:
self._ensemble_variables['y_pulls'] = EnsembleVariable(
ensemble_array=np.zeros((self._n_exp, self.n_dat)),
distribution=scipy.stats.norm,
distribution_parameters=dict(loc=0, scale=1)
)
self._ensemble_variable_plotters['y_pulls'] = EnsembleVariablePlotter(
ensemble_variable=self._ensemble_variables['y_pulls'],
value_ranges=(-3, 3),
variable_labels=['Pull $y_{%d}$' % (_i,) for _i in six.moves.range(1, self.n_dat+1)]
)
if 'x_data' in self._requested_results:
self._ensemble_variables['x_data'] = EnsembleVariable(
ensemble_array=np.zeros((self._n_exp, self.n_dat)),
distribution=scipy.stats.norm,
distribution_parameters=dict(loc=self._ref_x_data, scale=self._toy_fit.x_total_error)
)
self._ensemble_variable_plotters['x_data'] = EnsembleVariablePlotter(
ensemble_variable=self._ensemble_variables['x_data'],
value_ranges=np.array([self._ref_x_data - 3 * self._toy_fit.x_total_error,
self._ref_x_data + 3 * self._toy_fit.x_total_error]).T,
variable_labels=['$x_{%d}$' % (_i,) for _i in six.moves.range(1, self.n_dat+1)]
)
if 'y_data' in self._requested_results:
self._ensemble_variables['y_data'] = EnsembleVariable(
ensemble_array=np.zeros((self._n_exp, self.n_dat)),
distribution=scipy.stats.norm,
distribution_parameters=dict(loc=self._ref_y_data, scale=self._ref_projected_xy_err)
)
self._ensemble_variable_plotters['y_data'] = EnsembleVariablePlotter(
ensemble_variable=self._ensemble_variables['y_data'],
value_ranges=np.array([self._ref_y_data-3*self._ref_projected_xy_err,
self._ref_y_data+3*self._ref_projected_xy_err]).T,
variable_labels=['$y_{%d}$' % (_i,) for _i in six.moves.range(1, self.n_dat+1)]
)
if 'y_model' in self._requested_results:
self._ensemble_variables['y_model'] = EnsembleVariable(
ensemble_array=np.zeros((self._n_exp, self.n_dat)),
distribution=scipy.stats.norm,
distribution_parameters=dict(loc=self._ref_y_data, scale=self._ref_projected_xy_err)
)
self._ensemble_variable_plotters['y_model'] = EnsembleVariablePlotter(
ensemble_variable=self._ensemble_variables['y_model'],
value_ranges=np.array([self._ref_y_data-3*self._ref_projected_xy_err,
self._ref_y_data+3*self._ref_projected_xy_err]).T,
variable_labels=['$f(x_{%d})$' % (_i,) for _i in six.moves.range(1, self.n_dat+1)]
)
if 'parameter_pulls' in self._requested_results:
self._ensemble_variables['parameter_pulls'] = EnsembleVariable(
ensemble_array=np.zeros((self._n_exp, self._n_par)),
distribution=scipy.stats.norm,
distribution_parameters=dict(loc=0, scale=1)
)
self._ensemble_variable_plotters['parameter_pulls'] = EnsembleVariablePlotter(
ensemble_variable=self._ensemble_variables['parameter_pulls'],
value_ranges=(-3, 3),
variable_labels=["Pull ${}$".format(_arg_formatter.latex_name)
for _arg_formatter in self._toy_fit._model_function.formatter.arg_formatters]
)
if 'cost' in self._requested_results:
self._ensemble_variables['cost'] = EnsembleVariable(
ensemble_array=np.zeros((self._n_exp,)),
distribution=scipy.stats.chi2, # FIXME: assume chi2 for all cost functions -> change
distribution_parameters=dict(loc=0, df=self.n_df)
)
self._ensemble_variable_plotters['cost'] = EnsembleVariablePlotter(
ensemble_variable=self._ensemble_variables['cost'],
value_ranges=(0, 3*self.n_df),
variable_labels="${}$".format(self._toy_fit._cost_function.formatter.latex_name)
)
def _make_figure_gs(self, figsize=(8, 8), nrows=1, ncols=1,
left=0.1, bottom=0.1,
right=0.9, top=0.9):
"""create a new matplotlib figure with a GridSpec controlling the subplot layout"""
_fig = plt.figure(figsize=figsize) # defaults from matplotlibrc
_gs = gs.GridSpec(nrows=nrows,
ncols=ncols,
left=left,
bottom=bottom,
right=right,
top=top,
wspace=None,
hspace=None,
height_ratios=None)
return _fig, _gs
def _update_reference_quantities_from_toy_fit(self):
self._ref_y_data = self._toy_fit.eval_model_function(x=self._ref_x_data,
model_parameters=self._model_parameters)
self._ref_x_cov_mat = self._toy_fit.x_total_cov_mat
self._ref_y_cov_mat = self._toy_fit.y_total_cov_mat
self._ref_projected_xy_cov_mat = self._toy_fit.total_cov_mat
self._ref_x_err = self._toy_fit.x_total_error
self._ref_y_err = self._toy_fit.y_total_error
self._ref_projected_xy_err = self._toy_fit.total_error
# -- private properties
@property
def _x_data(self):
"""property for ensemble variable 'x_data'"""
return self._toy_fit.x
@property
def _parameter_pulls(self):
"""property for ensemble variable 'parameter_pulls'"""
return (self._toy_fit.parameter_values - self._model_parameters)/self._toy_fit.parameter_errors
@property
def _y_data(self):
"""property for ensemble variable 'y_data'"""
return self._toy_fit.y_data
@property
def _y_model(self):
"""property for ensemble variable 'y_model'"""
return self._toy_fit.y_model
@property
def _y_pulls(self):
"""property for ensemble variable 'y_pulls'"""
return (self._toy_fit.y_data - self._toy_fit.y_model) / self._toy_fit.y_total_error
@property
def _cost(self):
"""property for ensemble variable 'cost'"""
return self._toy_fit.cost_function_value
# -- public properties
@property
def n_exp(self):
"""The number of pseudo-experiments to perform.
:rtype: int
"""
return self._n_exp
@property
def n_par(self):
"""The number of parameters.
:rtype: int
"""
return self._n_par
@property
def n_dat(self):
"""The number of data points used for the fit.
:rtype: int
"""
return self._toy_fit.data_container.size
@property
def n_df(self):
"""The number of degrees of freedom for the fit
:rtype: int
"""
# FIXME: not generally true -> update to handle constrained parameters
# TODO: not applicable for all cost functions -> find a flexible solution
return self.n_dat - self.n_par
# -- public methods
def add_error(self, axis, err_val, name=None, correlation=0, relative=False, reference='data'):
self._toy_fit.add_error(axis=axis, err_val=err_val,
name=name, correlation=correlation, relative=relative,
reference=reference)
self._update_reference_quantities_from_toy_fit() # recompute reference errors
# "inherit" docstring
add_error.__doc__ = XYFit.add_error.__doc__
def add_matrix_error(self, axis, err_matrix, matrix_type, name=None, err_val=None, relative=False, reference='data'):
self._toy_fit.add_matrix_error(axis=axis, err_matrix=err_matrix,
matrix_type=matrix_type, name=name, err_val=err_val,
relative=relative, reference=reference)
self._update_reference_quantities_from_toy_fit() # recompute reference errors
# "inherit" docstring
add_matrix_error.__doc__ = XYFit.add_matrix_error.__doc__
def run(self):
"""Perform the pseudo-experiments. Retrieve and store the requested fit result variables."""
self._set_toy_fit_parameters_to_reference()
self._update_reference_quantities_from_toy_fit()
self._initialize_ensemble_variables()
for _i_exp in six.moves.range(self.n_exp):
self._generate_pseudodata()
self._do_toy_fit()
self._gather_results_from_toy_fit(_i_exp)
def get_results(self, *results):
"""Return a dictionary containing the ensembles of result variables.
:param results: Names of result variables to retrieve. Calling without arguments retrieves
*all* collected results.
:type results: typing.Iterable[str]
:rtype: dict
"""
if not results:
results = self._requested_results
else:
# validate list of results requested by user
_unavailable_results = set(self._requested_results) - set(self.AVAILABLE_RESULTS.keys())
if _unavailable_results:
raise ValueError("Requested unavailable result variable(s): %r"
% (_unavailable_results,))
_dict_to_return = dict()
for _result_name in results:
_var = self._ensemble_variables.get(_result_name, None)
if _var is None:
raise FitEnsembleException("Cannot retrieve result '{}': "
"variable not collected!".format(_result_name))
_dict_to_return[_result_name] = _var.values
return _dict_to_return
def get_results_statistics(self, results='all', statistics='all'):
"""Return a dictionary containing statistics (e.g. mean) of the result variables.
:param results: Names of retrieved fit variable for which to return statistics. If
``'all'``, get statistics for all retrieved variables
:type results: typing.Iterable[str] or str
:param statistics: Names of statistics to retrieve for each result variable. If ``'all'``,
get all statistics for each retrieved variable
:type statistics: typing.Iterable[str] or str
:rtype: dict
"""
if results == 'all':
results = self._requested_results
if statistics == 'all':
statistics = self.__class__._DEFAULT_STATISTICS
_dict_to_return = dict()
for _result_name in results:
#_result_array = self._result_array_dicts.get(_result_name, None)
_result_variable = self._ensemble_variables.get(_result_name, None)
if _result_variable is None:
raise FitEnsembleException("Cannot retrieve statistics for result "
"variable '{}': variable not collected!".format(_result_name))
_current_result_dict = _dict_to_return[_result_name] = dict()
# calculate and store statistics
for _stat_name in statistics:
_stat_unbound_method = self.__class__.AVAILABLE_STATISTICS.get(_stat_name, None)
if _stat_unbound_method is None:
raise FitEnsembleException(
"Unknown statistic '%s' requested!" % (_stat_name,))
_stat = _stat_unbound_method.__get__(_result_variable, EnsembleVariable)
_current_result_dict[_stat_name] = _stat
return _dict_to_return
def plot_result_distributions(self, results='all',
show_legend=True):
"""Make plots with histograms of the requested fit variable values across all
pseudo-experiments.
:param results: Names of retrieved fit variable for which to generate plots. If
``'all'``, plots for all retrieved variables will be made.
:type results: typing.Iterable[str] or str
:param show_legend: If a legend is shown on each figure.
:type show_legend: bool
"""
if results == 'all':
results = self._requested_results
else:
# validate list of results requested by user
_unavailable_results = set(self._requested_results) - set(self.AVAILABLE_RESULTS.keys())
if _unavailable_results:
raise ValueError("Requested unavailable result variable(s): %r"
% (_unavailable_results,))
for _result_name in results:
_result_variable = self._ensemble_variables.get(_result_name, None)
if _result_variable is None:
raise FitEnsembleException("Cannot plot result for variable '%s': "
"variable not collected!" % (_result_name,))
_result_variable_plotter = self._ensemble_variable_plotters.get(_result_name, None)
if _result_variable_plotter is None:
raise FitEnsembleException("Cannot plot result for variable '%s': "
"no plotter defined!" % (_result_name,))
# -- decide how to lay out plots depending on the result variable dimensionality
if _result_variable.ndim == 0:
# if the ensemble variable is a scalar,
# plot it into a single `Axes` object
_fig, _gs = self._make_figure_gs(figsize=(8, 8), nrows=1, ncols=1)
_ax = plt.subplot(_gs[0, 0])
# call the plotting routine on the axes grid
_plot_result_dict = _result_variable_plotter.plot_hist(_ax)
elif _result_variable.ndim == 1:
# if the ensemble variable is a one-dimensional vector,
# plot each entry into a separate `Axes` object and display
# them in a grid-like layout
_nplots = int(_result_variable.shape[0])
_nrows, _ncols = _heuristic_optimal_subplot_grid_size(_nplots, aspect_ratio_priority=0.8)
_fig, _gs = self._make_figure_gs(figsize=(8, 8), nrows=_nrows, ncols=_ncols)
# create an array 'a' with a[i, j] = [i, j]
_axes_grid = np.dstack((np.meshgrid(np.arange(_nrows), np.arange(_ncols))))
# replace [i, j] by the `Axes` object for _gs[i, j] -> array of `Axes`
_axes_grid = np.apply_along_axis(
lambda irow_icol: plt.subplot(_gs[irow_icol[0], irow_icol[1]]) if irow_icol[0]*_ncols+irow_icol[1] < _nplots else None,
-1, _axes_grid)
# reshape the `Axes` array to match the variable shape
_axes_grid = _axes_grid.T.flatten()[:_result_variable.shape[0]]
# call the plotting routine on the axes grid
_plot_result_dict = _result_variable_plotter.plot_hist(_axes_grid)
elif _result_variable.ndim == 2:
# if the ensemble variable is a two-dimensional vector,
# plot the (i,j)-th entry into a an `Axes` object at the
# (i,j)-th position in a grid
_nrows = _result_variable.shape[0]
_ncols = _result_variable.shape[1]
_fig, _gs = self._make_figure_gs(figsize=(8, 8), nrows=_nrows, ncols=_ncols)
# create an array 'a' with a[i, j] = [i, j]
_axes_grid = np.dstack(reversed(np.meshgrid(np.arange(_nrows), np.arange(_ncols))))
# replace [i, j] by the `Axes` object for _gs[i, j] -> array of `Axes`
_axes_grid = np.apply_along_axis(
lambda irow_icol: plt.subplot(_gs[irow_icol[0], irow_icol[1]]),
-1, _axes_grid)
# do not reshape _axes_grid -> its shape already matches variable shape
# call the plotting routine on the axes grid
_plot_result_dict = _result_variable_plotter.plot_hist(_axes_grid)
else:
# cannot plot variables with 3 or more dimensions...
raise FitEnsembleException("Cannot plot result for variable '%s': variable entry dimensionality "
"too high (%d)!" % (_result_name, _result_variable.ndim))
if show_legend:
_fig.legend(_plot_result_dict['legend_handles'],
_plot_result_dict['legend_labels'], loc='lower center')
# add extra space at figure bottom for legend
_figure_extra_bottom = 0.05 * len(_plot_result_dict['legend_labels'])
else:
# no extra space at figure bottom
_figure_extra_bottom = 0.0
_fig.canvas.set_window_title(_result_name)
_gs.tight_layout(_fig,
pad=0.0, w_pad=0, h_pad=-0.2,
rect=(0.01, 0.02+_figure_extra_bottom, 0.98, 0.98))
return _plot_result_dict
def plot_result_scatter(self, results='all',
show_legend=True):
"""Make scatter plots of the requested fit variable values across all pseudo-experiments.
:param results: Names of retrieved fit variable for which to generate plots. If
``'all'``, plots for all retrieved variables will be made.
:type results: typing.Iterable[str] or str
:param show_legend: If a legend is shown on each figure.
:type show_legend: bool
"""
if results == 'all':
results = self._requested_results
else:
# validate list of results requested by user
_unavailable_results = set(self._requested_results) - set(self.AVAILABLE_RESULTS.keys())
if _unavailable_results:
raise ValueError("Requested unavailable result variable(s): %r"
% (_unavailable_results,))
for _result_name in results:
_result_variable = self._ensemble_variables.get(_result_name, None)
if _result_variable is None:
raise FitEnsembleException("Cannot plot result for variable '%s': "
"variable not collected!" % (_result_name,))
_result_variable_plotter = self._ensemble_variable_plotters.get(_result_name, None)
if _result_variable_plotter is None:
raise FitEnsembleException("Cannot plot result for variable '%s': "
"no plotter defined!" % (_result_name,))
# -- decide how to lay out plots depending on the result variable dimensionality
if _result_variable.ndim != 1:
raise ValueError()
# if the ensemble variable is a one-dimensional vector,
# plot each entry into a separate `Axes` object and display
# them in a grid-like layout
_nrows = _ncols = int(_result_variable.shape[0])
if _nrows <= 1:
raise FitEnsembleException("Cannot create scatter plot for result variable '%s': "
"vector has less than two entries!" % (_result_name,))
_fig, _gs = self._make_figure_gs(figsize=(8, 8), nrows=_nrows-1, ncols=_ncols-1)
# create an array 'a' with a[i, j] = [i, j]
_axes_grid = np.dstack((np.meshgrid(np.arange(_nrows), np.arange(_ncols))))
# replace [i, j] by the `Axes` object for _gs[i, j] -> array of `Axes`
_axes_grid = np.apply_along_axis(
lambda irow_icol: plt.subplot(_gs[irow_icol[0] - 1, irow_icol[1]]) if irow_icol[0] > irow_icol[1] else None,
-1, _axes_grid)
# call the plotting routine on the axes grid
_plot_result_dict = _result_variable_plotter.plot_scatter(_axes_grid)
if show_legend:
_fig.legend(_plot_result_dict['legend_handles'],
_plot_result_dict['legend_labels'], loc='lower center')
# add extra space at figure bottom for legend
_figure_extra_bottom = 0.05 * len(_plot_result_dict['legend_labels'])
else:
# no extra space at figure bottom
_figure_extra_bottom = 0.0
_fig.canvas.set_window_title(_result_name)
_gs.tight_layout(_fig,
pad=0.0, w_pad=0, h_pad=-0.2,
rect=(0.01, 0.02+_figure_extra_bottom, 0.98, 0.98))
return _plot_result_dict
AVAILABLE_RESULTS = {
'parameter_pulls': _parameter_pulls,
'x_data': _x_data,
'y_pulls': _y_pulls,
'cost': _cost,
'y_data': _y_data,
'y_model': _y_model,
}
_DEFAULT_RESULTS = {'y_pulls', 'parameter_pulls', 'cost'}
| gpl-3.0 |
BU-PyCon/Meeting-1 | Programs/basic_plotting.py | 1 | 1412 | #BiMonBUPyCon - First meeting - 3/22/2015
selection = input('Input plot #: ')
print(type(selection))
#Basic plotting
if selection == '1':
#Example 1: Basic importing and plotting procedures
#-------------------------------------------------------------------------------------
import matplotlib
import matplotlib.pyplot as plt
x = range(6)
plt.plot(x)
plt.show()
#-------------------------------------------------------------------------------------
if selection == '2':
#Example 2: More advanced plot and interactive
#-------------------------------------------------------------------------------------
import matplotlib
import matplotlib.pyplot as plt
plt.ion()
import numpy as np
x = np.arange(9)
plt.plot(x,x**2,color='r',linewidth=2,marker='o', linestyle=':',label='A')
plt.plot(x,3.*x**3, c='b', lw=1.5, ls='--',label='B')
plt.scatter(x,1500.*np.ones(9),c='k',s=100,edgecolor='#FF6600',lw=4,label='C')
plt.legend(loc='best')
plt.xlabel('Time', fontsize='x-large')
plt.ylabel(r'$\beta \; $or$\epsilon$do$\bf{m}$', fontsize='large')
plt.title('AS### Classes - Your choice', fontsize='medium')
plt.xlim(0,10)
plt.ylim(-100,2000)
plt.text(2,1000,'Your Ad Here!',ha='left', va='center',fontsize='x-large')
plt.show()
plt.savefig('/Users/paul/Desktop/figure.png',dpi=300)
#-------------------------------------------------------------------------------------
| mit |
GuessWhoSamFoo/pandas | pandas/tests/test_lib.py | 2 | 7875 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas._libs import lib, writers as libwriters
from pandas import Index
import pandas.util.testing as tm
class TestMisc(object):
def test_max_len_string_array(self):
arr = a = np.array(['foo', 'b', np.nan], dtype='object')
assert libwriters.max_len_string_array(arr) == 3
# unicode
arr = a.astype('U').astype(object)
assert libwriters.max_len_string_array(arr) == 3
# bytes for python3
arr = a.astype('S').astype(object)
assert libwriters.max_len_string_array(arr) == 3
# raises
with pytest.raises(TypeError):
libwriters.max_len_string_array(arr.astype('U'))
def test_fast_unique_multiple_list_gen_sort(self):
keys = [['p', 'a'], ['n', 'd'], ['a', 's']]
gen = (key for key in keys)
expected = np.array(['a', 'd', 'n', 'p', 's'])
out = lib.fast_unique_multiple_list_gen(gen, sort=True)
tm.assert_numpy_array_equal(np.array(out), expected)
gen = (key for key in keys)
expected = np.array(['p', 'a', 'n', 'd', 's'])
out = lib.fast_unique_multiple_list_gen(gen, sort=False)
tm.assert_numpy_array_equal(np.array(out), expected)
class TestIndexing(object):
def test_maybe_indices_to_slice_left_edge(self):
target = np.arange(100)
# slice
indices = np.array([], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
for end in [1, 2, 5, 20, 99]:
for step in [1, 2, 4]:
indices = np.arange(0, end, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# not slice
for case in [[2, 1, 2, 0], [2, 2, 1, 0], [0, 1, 2, 1], [-2, 0, 2],
[2, 0, -2]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_right_edge(self):
target = np.arange(100)
# slice
for start in [0, 2, 5, 20, 97, 98]:
for step in [1, 2, 4]:
indices = np.arange(start, 99, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# not slice
indices = np.array([97, 98, 99, 100], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
with pytest.raises(IndexError):
target[indices]
with pytest.raises(IndexError):
target[maybe_slice]
indices = np.array([100, 99, 98, 97], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
with pytest.raises(IndexError):
target[indices]
with pytest.raises(IndexError):
target[maybe_slice]
for case in [[99, 97, 99, 96], [99, 99, 98, 97], [98, 98, 97, 96]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_both_edges(self):
target = np.arange(10)
# slice
for step in [1, 2, 4, 5, 8, 9]:
indices = np.arange(0, 9, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
for case in [[4, 2, 0, -2], [2, 2, 1, 0], [0, 1, 2, 1]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_middle(self):
target = np.arange(100)
# slice
for start, end in [(2, 10), (5, 25), (65, 97)]:
for step in [1, 2, 4, 20]:
indices = np.arange(start, end, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# not slice
for case in [[14, 12, 10, 12], [12, 12, 11, 10], [10, 11, 12, 11]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_booleans_to_slice(self):
arr = np.array([0, 0, 1, 1, 1, 0, 1], dtype=np.uint8)
result = lib.maybe_booleans_to_slice(arr)
assert result.dtype == np.bool_
result = lib.maybe_booleans_to_slice(arr[:0])
assert result == slice(0, 0)
def test_get_reverse_indexer(self):
indexer = np.array([-1, -1, 1, 2, 0, -1, 3, 4], dtype=np.int64)
result = lib.get_reverse_indexer(indexer, 5)
expected = np.array([4, 2, 3, 6, 7], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_cache_readonly_preserve_docstrings():
# GH18197
assert Index.hasnans.__doc__ is not None
| bsd-3-clause |
bafnalab/CLEAR | CLEAR.py | 1 | 13998 | '''
Copyleft Oct 27, 2016 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: airanmehr@gmail.com
'''
import numpy as np;
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = False
import pylab as plt
import matplotlib as mpl
from numba import guvectorize,vectorize
def processSyncFileLine(x,dialellic=True):
z = x.apply(lambda xx: pd.Series(xx.split(':'), index=['A', 'T', 'C', 'G', 'N', 'del'])).astype(float).iloc[:, :4]
ref = x.name[-1]
alt = z.sum().sort_values()[-2:]
alt = alt[(alt.index != ref)].index[0]
if dialellic: ## Alternate allele is everthing except reference
return pd.concat([z[ref].astype(int).rename('C'), (z.sum(1)).rename('D')], axis=1).stack()
else: ## Alternate allele is the allele with the most reads
return pd.concat([z[ref].astype(int).rename('C'), (z[ref] + z[alt]).rename('D')], axis=1).stack()
def loadSync(fname = './sample_data/popoolation2/F37.sync'):
print 'loading',fname
cols=pd.read_csv(fname.replace('.sync','.pops'), sep='\t', header=None, comment='#').iloc[0].apply(lambda x: map(int,x.split(','))).tolist()
data=pd.read_csv(fname, sep='\t', header=None).set_index(range(3))
data.columns=pd.MultiIndex.from_tuples(cols)
data.index.names= ['CHROM', 'POS', 'REF']
data=data.sort_index().reorder_levels([1,0],axis=1).sort_index(axis=1)
data=data.apply(processSyncFileLine,axis=1)
data.columns.names=['REP','GEN','READ']
data=changeCtoAlternateAndDampZeroReads(data)
data.index=data.index.droplevel('REF')
return data
def changeCtoAlternateAndDampZeroReads(a):
C = a.xs('C', level=2, axis=1).sort_index().sort_index(axis=1)
D = a.xs('D', level=2, axis=1).sort_index().sort_index(axis=1)
C = D - C
if (D == 0).sum().sum():
C[D == 0] += 1
D[D == 0] += 2
C.columns = pd.MultiIndex.from_tuples([x + ('C',) for x in C.columns], names=C.columns.names + ['READ'])
D.columns = pd.MultiIndex.from_tuples([x + ('D',) for x in D.columns], names=D.columns.names + ['READ'])
return pd.concat([C, D], axis=1).sort_index(axis=1).sort_index()
def power_recursive(T, n, powers_cached):
if n not in powers_cached.index:
if n % 2 == 0:
TT = power_recursive(T, n / 2,powers_cached)
powers_cached[n]= TT.dot(TT)
else:
powers_cached[n]= T .dot( power_recursive(T, n - 1,powers_cached))
return powers_cached[n]
def computePowers(T,powers):
powers_cached =pd.Series([np.eye(T.shape[0]),T],index=[0,1])
for n in powers:
power_recursive(T, n, powers_cached)
return powers_cached.loc[powers]
def precomputeTransition(data, sh,N ):
s,h=sh
print 'Precomputing transitions for s={:.3f} h={:2f}'.format(s,h)
powers=np.unique(np.concatenate(Powers(data.xs('D',level='READ',axis=1)).tolist()))
T = Markov.computeTransition(s, N, h=h, takeLog=False)
Tn=computePowers(T,powers)
return Tn
Powers=lambda CD:CD.groupby(level=0,axis=1).apply(lambda x: pd.Series(x[x.name].columns).diff().values[1:].astype(int))
def HMMlik(E, Ts, CD, s):
T = Ts.loc[s]
powers=Powers(CD)
likes = pd.Series(0, index=CD.index)
for rep, df in CD.T.groupby(level=0):
alpha = E.iloc[df.loc[(rep, 0)]].values
for step, power in zip(range(1, df.shape[0]), powers[rep]):
alpha = alpha.dot(T.loc[power].values) * E.values[df.loc[rep].iloc[step].values]
likes += vectorizedLog(alpha.mean(1))
return likes
def findML(init, gridS, cd, E, T, eps):
i = pd.Series(True, index=init.index).values;
mlprev = init.values.copy(True);
mlcurrent = init.values.copy(True)
mle = np.ones(mlcurrent.size) * gridS[0];
ml = init.values.copy(True)
for s in gridS[1:]:
mlprev[i] = mlcurrent[i]
mlcurrent[i] = HMMlik(E, T, cd[i], s)
i = mlcurrent > mlprev + eps
if i.sum() == 0: break
mle[i] = s
ml[i] = mlcurrent[i]
return pd.DataFrame([ml, mle], index=['alt', 's'], columns=cd.index).T
def HMM(CD, E, Ts,null_s_th=None,eps=1e-1):
print 'Fitting HMM to {} variants.'.format(CD.shape[0])
if null_s_th is None:
gridS=Ts.index.values
ss=np.sort(Ts.index.values)
null_s_th=np.abs(ss[ss!=0]).min()
likes_null = HMMlik(E, Ts, CD, 0);
likes_null.name = 'null'
likes_thn = HMMlik(E, Ts, CD, -null_s_th)
likes_thp = HMMlik(E, Ts, CD[likes_null > likes_thn], null_s_th)
neg = likes_thn[likes_null < likes_thn]
zero = likes_null.loc[(likes_null.loc[likes_thp.index] >= likes_thp).replace({False: None}).dropna().index];
pos = likes_thp.loc[(likes_null.loc[likes_thp.index] < likes_thp).replace({False: None}).dropna().index];
dfz = pd.DataFrame(zero.values, index=zero.index, columns=['alt']);
dfz['s'] = 0
dfp = findML(pos, gridS[gridS>=null_s_th], CD.loc[pos.index], E, Ts, eps)
dfn = findML(neg, gridS[gridS<=-null_s_th][::-1], CD.loc[neg.index], E, Ts, eps)
df = pd.concat([dfp, dfz, dfn])
df = pd.concat([df, likes_null], axis=1)
return df
import scipy.misc as sc
def getStateLikelihoods(cd, nu): c, d = cd; p = sc.comb(d, c) * (nu ** c) * (
(1 - nu) ** (d - c)); return p
def precomputeCDandEmissions(data):
"""
0- reads C read counts of reference and D counts of depth
1- computes alternate allele reads based on reference and depth
2- saves CD
3- saves state conditional distributions P(nu|(c,d)) aka emissions
"""
print 'Precomputing CD (C,D)=(Derived count,total Count) and corresponding emission probabilities...'
nu = pd.Series(np.arange(0, 1.00001, 0.001), index=np.arange(0, 1.00001, 0.001))
c = data.xs('C', level='READ', axis=1)
d = data.xs('D', level='READ', axis=1)
cd = pd.concat([pd.Series(zip(c[i], d[i])) for i in c.columns], axis=1);
cd.columns = c.columns;
cd.index = c.index
allreads = pd.Series(cd.values.reshape(-1)).unique();
allreads = pd.Series(allreads, index=pd.MultiIndex.from_tuples(allreads, names=['c', 'd'])).sort_index()
emissions = allreads.apply(lambda x: getStateLikelihoods(x, nu)).sort_index()
index = pd.Series(range(emissions.shape[0]), emissions.index)
CDEidx = cd.applymap(lambda x: index.loc[x])
return emissions,CDEidx
def precomputeTransitions(data,rangeS=np.arange(-0.5,0.5001,0.1),rangeH=[0.5],N=500):
SS,HH=np.meshgrid(np.round(rangeS,3),np.round(rangeH,3))
SH=zip(SS.reshape(-1), HH.reshape(-1))
return pd.Series(map(lambda sh: precomputeTransition(data,sh,N),SH),index=pd.MultiIndex.from_tuples(SH,names=['s','h'])).xs(0.5,level='h')
def Manhattan(data, columns=None, names=None, fname=None, colors=['black', 'gray'], markerSize=3, ylim=None, show=True,
std_th=None, top_k=None, cutoff=None, common=None, Outliers=None, shade=None, fig=None, ticksize=4,
sortedAlready=False):
def reset_index(x):
if x is None: return None
if 'CHROM' not in x.columns:
return x.reset_index()
else:
return x
if type(data) == pd.Series:
DF = pd.DataFrame(data)
else:
DF = data
if columns is None: columns=DF.columns
if names is None:names=columns
df = reset_index(DF)
Outliers = reset_index(Outliers)
if not sortedAlready: df = df.sort_index()
if not show:
plt.ioff()
from itertools import cycle
def addGlobalPOSIndex(df,chroms):
if df is not None:
df['gpos'] = df.POS + chroms.offset.loc[df.CHROM].values
df.set_index('gpos', inplace=True);
df.sort_index(inplace=True)
def plotOne(b, d, name, chroms,common,shade):
a = b.dropna()
c = d.loc[a.index]
if shade is not None:
for _ , row in shade.iterrows():
plt.gca().fill_between([row.gstart, row.gend], a.min(), a.max(), color='b', alpha=0.3)
plt.scatter(a.index, a, s=markerSize, c=c, alpha=0.8, edgecolors='none')
outliers=None
if Outliers is not None:
outliers=Outliers[name].dropna()
if cutoff is not None:
outliers = a[a >= cutoff[name]]
elif top_k is not None:
outliers = a.sort_values(ascending=False).iloc[:top_k]
elif std_th is not None:
outliers = a[a > a.mean() + std_th * a.std()]
if outliers is not None:
if len(outliers):
# if name != 'Number of SNPs':
plt.scatter(outliers.index, outliers, s=markerSize, c='r', alpha=0.8, edgecolors='none')
# plt.axhline(outliers.min(), color='k', ls='--')
if common is not None:
for ii in common.index: plt.axvline(ii,c='g',alpha=0.5)
plt.axis('tight');
plt.xlim(0, a.index[-1]);
plt.ylabel(name, fontsize=ticksize * 1.5)
# plt.title('{} SNPs, {} are red.'.format(a.dropna().shape[0], outliers.shape[0]))
plt.xticks([x for x in chroms.mid], [str(x) for x in chroms.index], rotation=-90, fontsize=ticksize * 1.5)
plt.setp(plt.gca().get_xticklabels(), visible=False)
plt.locator_params(axis='y', nbins=3)
mpl.rc('ytick', labelsize=ticksize)
if ylim is not None: plt.ylim(ymin=ylim)
chroms = pd.DataFrame(df.groupby('CHROM').POS.max().rename('len').loc[df.reset_index().CHROM.unique()] + 1000)
chroms['offset'] = np.append([0], chroms.len.cumsum().iloc[:-1].values)
chroms['color'] = [c for (_, c) in zip(range(chroms.shape[0]), cycle(colors))]
chroms['mid'] = [x + y / 2 for x, y in zip(chroms.offset, chroms.len)]
df['color'] = chroms.color.loc[df.CHROM].values
df['gpos'] = df.POS + chroms.offset.loc[df.CHROM].values
df['color'] = chroms.color.loc[df.CHROM].values
df.set_index('gpos', inplace=True);
if shade is not None:
shade['gstart']=shade.start + chroms.offset.loc[shade.CHROM].values
shade['gend']=shade.end + chroms.offset.loc[shade.CHROM].values
addGlobalPOSIndex(common, chroms);
addGlobalPOSIndex(Outliers, chroms)
if fig is None:
fig = plt.figure(figsize=(7, 1.5*columns.size), dpi=300);
for i in range(columns.size):
plt.subplot(columns.size, 1, i+1);
plotOne(df[columns[i]], df.color, names[i], chroms,common, shade)
plt.setp(plt.gca().get_xticklabels(), visible=True)
plt.xlabel('Chromosome', size=ticksize * 1.5)
if fname is not None:
print 'saving ', fname
plt.savefig(fname)
if not show:
plt.ion()
plt.gcf().subplots_adjust(bottom=0.25)
# sns.set_style("whitegrid", {"grid.color": "1", 'axes.linewidth': .5, "grid.linewidth": ".09"})
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': ticksize});
mpl.rc('text', usetex=True)
plt.show()
return fig
class Markov:
@staticmethod
def computePower(T,n,takeLog=False):
Tn=T.copy(True)
for i in range(n-1):
Tn=Tn.dot(T)
if takeLog:
return Tn.applymap(np.log)
else:
return Tn
@staticmethod
def computeTransition(s, N, h=0.5, takeLog=False):
# s,h=-0.5,0.5;N=500
nu0=np.arange(2*N+1)/float(2*N)
nu_t = map(lambda x: max(min(fx(x, s, h=h), 1.), 0.), nu0)
nu_t[0]+=1e-15;nu_t[-1]-=1e-15
return pd.DataFrame(computeTransition(nu_t),index=nu0,columns=nu0)
@staticmethod
def computeProb(X,T):
return sum([np.log(T.loc[X[t,r],X[t+1,r]]) for t in range(X.shape[0]-1) for r in range(X.shape[1])])
def fx(x, s=0.0, h=0.5): return ((1 + s) * x ** 2 + (1 + h * s) * x * (1 - x)) / (
(1 + s) * x ** 2 + 2 * (1 + h * s) * x * (1 - x) + (1 - x) ** 2)
@guvectorize(['void(float64[:], float64[:,:])'],'(n)->(n,n)')
def computeTransition(nu_t,T):
N = (nu_t.shape[0] - 1) / 2
logrange= np.log(np.arange(1,2*N+1))
lograngesum=logrange.sum()
lognu_t=np.log(nu_t)
lognu_tbar=np.log(1-nu_t)
for i in range(T.shape[0]):
for j in range(T.shape[0]):
T[i,j]= np.exp(lograngesum - logrange[:j].sum() - logrange[:2*N-j].sum()+ lognu_t[i]*j + lognu_tbar[i]*(2.*N-j))
if not nu_t[i]: T[i, 0] = 1;
if nu_t[i] == 1: T[i, -1] = 1;
@vectorize
def vectorizedLog(x):
return float(np.log(x))
def scanGenome(genome, f, winSize=50000, step=10000, minVariants=None):
"""
Args:
genome: scans genome, a series which CHROM and POS are its indices
windowSize:
step:
Returns:
"""
res=[]
if minVariants is not None: f.update({'COUNT': lambda x: x.size})
for chrname,chrom in genome.groupby(level='CHROM'):
df=pd.DataFrame(scanChromosome(chrom,f,winSize,step))
df['CHROM']=chrname;df.set_index('CHROM', append=True, inplace=True);df.index=df.index.swaplevel(0, 1)
res+=[df]
df = pd.concat(res)
if minVariants is not None:
df[df.COUNT < minVariants] = None
df = df.loc[:, df.columns != 'COUNT'].dropna()
return df
def scanChromosome(x,f,winSize,step):
"""
Args:
chrom: dataframe containing chromosome, positions are index and the index name should be set
windowSize: winsize
step: steps in sliding widnow
f: is a function or dict of fucntions e.g. f= {'Mean' : np.mean, 'Max' : np.max, 'Custom' : np.min}
Returns:
"""
POS=x.index.get_level_values('POS')
res=[]
def roundto(x, base=50000):return int(base * np.round(float(x)/base))
Bins=np.arange(max(0,roundto(POS.min()-winSize,base=step)), roundto(POS.max(),base=step),winSize)
for i in range(int(winSize/step)):
bins=i*step +Bins
windows=pd.cut( POS, bins,labels=(bins[:-1] + winSize/2).astype(int))
res+=[x.groupby(windows).agg(f)]
res[-1].index= res[-1].index.astype(int);res[-1].index.name='POS'
return pd.concat(res).sort_index().dropna()
| mit |
avuan/PyMPA37 | main.pympa.dir/pympa.py | 1 | 33384 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 2016/08/23 Version 34 - parameters24 input file needed
# 2017/10/27 Version 39 - Reformatted PEP8 Code
# 2017/11/05 Version 40 - Corrections to tdifmin, tstda calculations
# 2019/10/15 Version pympa - xcorr substitued with correlate_template from obspy
# First Version August 2014 - Last October 2017 (author: Alessandro Vuan)
# Code for the detection of microseismicity based on cross correlation
# of template events. The code exploits multiple cores to speed up time
#
# Method's references:
# The code is developed and maintained at
# Istituto Nazionale di Oceanografia e Geofisica di Trieste (OGS)
# and was inspired by collaborating with Aitaro Kato and collegues at ERI.
# Kato A, Obara K, Igarashi T, Tsuruoka H, Nakagawa S, Hirata N (2012)
# Propagation of slow slip leading up to the 2011 Mw 9.0 Tohoku-Oki
# earthquake. Science doi:10.1126/science.1215141
#
# For questions comments and suggestions please send an email to avuan@inogs.it
# The kernel function xcorr used from Austin Holland is modified in pympa
# Recommended the use of Obspy v. 1.2.0 with the substitution of xcorr function with
# correlate_template
# Software Requirements: the following dependencies are needed (check import
# and from statements below)
# Python "obspy" package installed via Anaconda with all numpy and scipy
# packages
# Python "math" libraries
# Python "bottleneck" utilities to speed up numpy array operations
#
# import useful libraries
import os
import os.path
import datetime
from math import log10
from time import perf_counter
import bottleneck as bn
import numpy as np
import pandas as pd
from obspy import read, Stream, Trace
from obspy.core import UTCDateTime
from obspy.core.event import read_events
from obspy.signal.trigger import coincidence_trigger
# from obspy.signal.cross_correlation import correlate_template
# LIST OF USEFUL FUNCTIONS
def listdays(year, month, day, period):
# create a list of days for scanning by templates
datelist = pd.date_range(datetime.datetime(year, month, day), periods=period).tolist()
a = list(map(pd.Timestamp.to_pydatetime, datelist))
days = []
for i in a:
days.append(i.strftime("%y%m%d"))
return days
def read_parameters(par):
# read 'parameters24' file to setup useful variables
with open(par) as fp:
data = fp.read().splitlines()
stations = data[23].split(" ")
print(stations)
channels = data[24].split(" ")
print(channels)
networks = data[25].split(" ")
print(networks)
lowpassf = float(data[26])
highpassf = float(data[27])
sample_tol = int(data[28])
cc_threshold = float(data[29])
nch_min = int(data[30])
temp_length = float(data[31])
utc_prec = int(data[32])
cont_dir = "./" + data[33] + "/"
temp_dir = "./" + data[34] + "/"
travel_dir = "./" + data[35] + "/"
dateperiod = data[36].split(" ")
ev_catalog = str(data[37])
start_itemp = int(data[38])
print("starting template = ", start_itemp)
stop_itemp = int(data[39])
print("ending template = ", stop_itemp)
factor_thre = int(data[40])
stdup = float(data[41])
stddown = float(data[42])
chan_max = int(data[43])
nchunk = int(data[44])
return (
stations,
channels,
networks,
lowpassf,
highpassf,
sample_tol,
cc_threshold,
nch_min,
temp_length,
utc_prec,
cont_dir,
temp_dir,
travel_dir,
dateperiod,
ev_catalog,
start_itemp,
stop_itemp,
factor_thre,
stdup,
stddown,
chan_max,
nchunk,
)
def trim_fill(tc, t1, t2):
tc.trim(starttime=t1, endtime=t2, pad=True, fill_value=0)
return tc
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def xcorr(x, y):
n = len(x)
m = len(y)
meany = np.nanmean(y)
stdy = np.nanstd(np.asarray(y))
tmp = rolling_window(x, m)
with np.errstate(divide="ignore"):
c = bn.nansum(
(y - meany) * (tmp - np.reshape(bn.nanmean(tmp, -1), (n - m + 1, 1))), -1
) / (m * bn.nanstd(tmp, -1) * stdy)
c[m * bn.nanstd(tmp, -1) * stdy == 0] = 0
return c
def process_input(itemp, nn, ss, ich, stream_df):
st_cft = Stream()
# itemp = template number, nn = network code, ss = station code,
# ich = channel code, stream_df = Stream() object as defined in obspy
# library
temp_file = "%s.%s.%s..%s.mseed" % (str(itemp), nn, ss, ich)
finpt = "%s%s" % (temp_dir, temp_file)
if os.path.isfile(finpt):
try:
tsize = os.path.getsize(finpt)
if tsize > 0:
# print "ok template exist and not empty"
st_temp = Stream()
st_temp = read(finpt)
tt = st_temp[0]
# continuous data are stored in stream_df
sc = stream_df.select(station=ss, channel=ich)
if sc.__nonzero__():
tc = sc[0]
fct = xcorr(tc.data, tt.data)
# fct = correlate_template(tc.data, tt.data)
stats = {
"network": tc.stats.network,
"station": tc.stats.station,
"location": "",
"channel": tc.stats.channel,
"starttime": tc.stats.starttime,
"npts": len(fct),
"sampling_rate": tc.stats.sampling_rate,
"mseed": {"dataquality": "D"},
}
trnew = Trace(data=fct, header=stats)
tc = trnew.copy()
st_cft = Stream(traces=[tc])
else:
print("warning no stream is found")
else:
print("warning template event is empty")
except OSError:
pass
return st_cft
def quality_cft(trac):
std_trac = np.nanstd(abs(trac.data))
return std_trac
def stack(stall, df, tstart, npts, stdup, stddown, nch_min):
std_trac = np.empty(len(stall))
td = np.empty(len(stall))
"""
Function to stack traces in a stream with different trace.id and
different starttime but the same number of datapoints.
Returns a trace having as starttime
the earliest startime within the stream
"""
for itr, tr in enumerate(stall):
std_trac[itr] = quality_cft(tr)
avestd = np.nanmean(std_trac[0:])
avestdup = avestd * stdup
avestddw = avestd * stddown
for jtr, tr in enumerate(stall):
if std_trac[jtr] >= avestdup or std_trac[jtr] <= avestddw:
stall.remove(tr)
print("removed Trace n Stream = ...", tr, std_trac[jtr], avestd)
td[jtr] = 99.99
# print(td[jtr])
else:
sta = tr.stats.station
chan = tr.stats.channel
net = tr.stats.network
s = "%s.%s.%s" % (net, sta, chan)
td[jtr] = float(d[s])
# print(td[jtr])
itr = len(stall)
print("itr == ", itr)
if itr >= nch_min:
tdifmin = min(td)
tdat = np.nansum([tr.data for tr in stall], axis=0) / itr
sta = "STACK"
cha = "BH"
net = "XX"
header = {
"network": net,
"station": sta,
"channel": cha,
"starttime": tstart,
"sampling_rate": df,
"npts": npts,
}
tt = Trace(data=tdat, header=header)
else:
tdifmin = None
sta = "STACK"
cha = "BH"
net = "XX"
header = {
"network": net,
"station": sta,
"channel": cha,
"starttime": tstart,
"sampling_rate": df,
"npts": npts,
}
tt = Trace(data=np.zeros(npts), header=header)
return tt, tdifmin
def csc(
stall, stcc, trg, tstda, sample_tol, cc_threshold, nch_min, day, itemp, itrig, f1
):
"""
The function check_singlechannelcft compute the maximum CFT's
values at each trigger time and counts the number of channels
having higher cross-correlation
nch, cft_ave, crt are re-evaluated on the basis of
+/- 2 sample approximation. Statistics are written in stat files
"""
# important parameters: a sample_tolerance less than 2 results often
# in wrong magnitudes
sample_tolerance = sample_tol
single_channelcft = cc_threshold
#
trigger_time = trg["time"]
tcft = stcc[0]
t0_tcft = tcft.stats.starttime
trigger_shift = trigger_time.timestamp - t0_tcft.timestamp
trigger_sample = int(round(trigger_shift / tcft.stats.delta))
max_sct = np.empty(len(stall))
max_trg = np.empty(len(stall))
max_ind = np.empty(len(stall))
chan_sct = np.chararray(len(stall), 12)
nch = 0
for icft, tsc in enumerate(stall):
# get cft amplitude value at corresponding trigger and store it in
# check for possible 2 sample shift and eventually change
# trg['cft_peaks']
chan_sct[icft] = (
tsc.stats.network + "." + tsc.stats.station + " " + tsc.stats.channel
)
tmp0 = trigger_sample - sample_tolerance
if tmp0 < 0:
tmp0 = 0
tmp1 = trigger_sample + sample_tolerance + 1
max_sct[icft] = max(tsc.data[tmp0:tmp1])
max_ind[icft] = np.nanargmax(tsc.data[tmp0:tmp1])
max_ind[icft] = sample_tolerance - max_ind[icft]
max_trg[icft] = tsc.data[trigger_sample : trigger_sample + 1]
nch = (max_sct > single_channelcft).sum()
if nch >= nch_min:
nch09 = (max_sct > 0.9).sum()
nch07 = (max_sct > 0.7).sum()
nch05 = (max_sct > 0.5).sum()
nch03 = (max_sct > 0.3).sum()
# print("nch ==", nch03, nch05, nch07, nch09)
cft_ave = np.nanmean(max_sct[:])
crt = cft_ave / tstda
cft_ave_trg = np.nanmean(max_trg[:])
crt_trg = cft_ave_trg / tstda
max_sct = max_sct.T
max_trg = max_trg.T
chan_sct = chan_sct.T
# str11 = "%s %s %s %s %s %s %s %s %s %s %s %s %s \n" %
# (day[0:6], str(itemp), str(itrig),
# trigger_time, tstda, cft_ave, crt, cft_ave_trg,
# crt_trg, nch03, nch05, nch07, nch09)
# str11 = "%s %s %s %s %s %s %s %s \n" % ( nch03, nch04, nch05,
# nch06, nch07, nch08, cft_ave, crt )
# f1.write(str11)
for idchan in range(0, len(max_sct)):
str22 = "%s %s %s %s \n" % (
chan_sct[idchan].decode(),
max_trg[idchan],
max_sct[idchan],
max_ind[idchan],
)
f1.write(str22)
else:
nch = 1
cft_ave = 1
crt = 1
cft_ave_trg = 1
crt_trg = 1
nch03 = 1
nch05 = 1
nch07 = 1
nch09 = 1
return nch, cft_ave, crt, cft_ave_trg, crt_trg, nch03, nch05, nch07, nch09
def mag_detect(magt, amaxt, amaxd):
"""
mag_detect(mag_temp,amax_temp,amax_detect)
Returns the magnitude of the new detection by using the template/detection
amplitude trace ratio
and the magnitude of the template event
"""
amaxr = amaxt / amaxd
magd = magt - log10(amaxr)
return magd
def reject_moutliers(data, m=1.0):
nonzeroind = np.nonzero(data)[0]
nzlen = len(nonzeroind)
# print("nonzeroind ==", nonzeroind)
data = data[nonzeroind]
# print("data ==", data)
datamed = np.nanmedian(data)
# print("datamed ==", datamed)
d = np.abs(data - datamed)
mdev = 2 * np.median(d)
# print("d, mdev ==", d, mdev)
if mdev == 0:
inds = np.arange(nzlen)
# print("inds ==", inds)
data[inds] = datamed
else:
s = d / mdev
inds = np.where(s <= m)
# print("inds ==", inds)
return data[inds]
def mad(dmad):
# calculate daily median absolute deviation
ccm = dmad[dmad != 0]
med_val = np.nanmedian(ccm)
tstda = np.nansum(abs(ccm - med_val) / len(ccm))
return tstda
start_time = perf_counter()
# read 'parameters24' file to setup useful variables
[
stations,
channels,
networks,
lowpassf,
highpassf,
sample_tol,
cc_threshold,
nch_min,
temp_length,
utc_prec,
cont_dir,
temp_dir,
travel_dir,
dateperiod,
ev_catalog,
start_itemp,
stop_itemp,
factor_thre,
stdup,
stddown,
chan_max,
nchunk,
] = read_parameters("parameters24")
# set time precision for UTCDATETIME
UTCDateTime.DEFAULT_PRECISION = utc_prec
# read Catalog of Templates Events
cat = read_events(ev_catalog, format="ZMAP")
ncat = len(cat)
# read template from standard input
# startTemplate = input("INPUT: Enter Starting template ")
# stopTemplate = input("INPUT: Enter Ending template ")
# print("OUTPUT: Running from template", startTemplate, " to ", stopTemplate)
t_start = start_itemp
t_stop = stop_itemp
# loop over days
# generate list of days "
year = int(dateperiod[0])
month = int(dateperiod[1])
day = int(dateperiod[2])
period = int(dateperiod[3])
days = listdays(year, month, day, period)
"""
initialise stt as a stream of templates
and stream_df as a stream of continuous waveforms
"""
stt = Stream()
stream_df = Stream()
stream_cft = Stream()
stall = Stream()
ccmad = Trace()
for day in days:
# settings to cut exactly 24 hours file from without including
# previous/next day
iday = "%s" % (day[4:6])
imonth = "%s" % (day[2:4])
print("imonth ==", imonth)
iyear = "20%s" % (day[0:2])
iiyear = int(iyear)
print(iyear, imonth, iday)
iimonth = int(imonth)
iiday = int(iday)
iihour = 23
iimin = 59
iisec = 0
for itemp in range(t_start, t_stop):
stt.clear()
# open file containing detections
fout = "%s.%s.cat" % (str(itemp), day[0:6])
f = open(fout, "w+")
print("itemp == ...", str(itemp))
# open statistics file for each detection
fout1 = "%s.%s.stats" % (str(itemp), day[0:6])
f1 = open(fout1, "w+")
# open file including magnitude information
fout2 = "%s.%s.stats.mag" % (str(itemp), day[0:6])
f2 = open(fout2, "w+")
# open file listing exceptions
fout3 = "%s.%s.except" % (str(itemp), day[0:6])
f3 = open(fout3, "w+")
ot = cat[itemp].origins[0].time
mt = cat[itemp].magnitudes[0].mag
lon = cat[itemp].origins[0].longitude
lat = cat[itemp].origins[0].latitude
dep = cat[itemp].origins[0].depth
# read ttimes, select the num_ttimes (parameters,
# last line) channels
# and read only these templates
travel_file = "%s%s.ttimes" % (travel_dir, str(itemp))
with open(travel_file, "r") as ttim:
d = dict(x.rstrip().split(None, 1) for x in ttim)
ttim.close()
s = d.items()
v = sorted(s, key=lambda x: (float(x[1])))[0:chan_max]
vv = [x[0] for x in v]
for vvc in vv:
n_net = vvc.split(".")[0]
n_sta = vvc.split(".")[1]
n_chn = vvc.split(".")[2]
filename = "%s%s.%s.%s..%s.mseed" % (
temp_dir,
str(itemp),
str(n_net),
str(n_sta),
str(n_chn),
)
print(filename)
stt += read(filename)
if len(stt) >= nch_min:
tc = Trace()
bandpass = [lowpassf, highpassf]
chunks = []
h24 = 86400
chunk_start = UTCDateTime(iiyear, iimonth, iiday)
end_time = chunk_start + h24
while chunk_start < end_time:
chunk_end = chunk_start + h24 / nchunk
if chunk_end > end_time:
chunk_end = end_time
chunks.append((chunk_start, chunk_end))
chunk_start += h24 / nchunk
for t1, t2 in chunks:
print(t1, t2)
stream_df.clear()
for tr in stt:
finpc1 = "%s%s.%s.%s" % (
cont_dir,
str(day),
str(tr.stats.station),
str(tr.stats.channel),
)
if os.path.exists(finpc1) and os.path.getsize(finpc1) > 0:
try:
st = read(finpc1, starttime=t1, endtime=t2)
if len(st) != 0:
st.merge(method=1, fill_value=0)
tc = st[0]
stat = tc.stats.station
chan = tc.stats.channel
tc.detrend("constant")
# 24h continuous trace starts 00 h 00 m 00.0s
trim_fill(tc, t1, t2)
tc.filter(
"bandpass",
freqmin=bandpass[0],
freqmax=bandpass[1],
zerophase=True,
)
# store detrended and filtered continuous data
# in a Stream
stream_df += Stream(traces=[tc])
except IOError:
pass
if len(stream_df) >= nch_min:
ntl = len(stt)
amaxat = np.empty(ntl)
# for each template event
# md=np.empty(ntl)
md = np.zeros(ntl)
damaxat = {}
# reference time to be used for
# retrieving time synchronization
reft = min([tr.stats.starttime for tr in stt])
for il, tr in enumerate(stt):
amaxat[il] = max(abs(tr.data))
sta_t = tr.stats.station
cha_t = tr.stats.channel
tid_t = "%s.%s" % (sta_t, cha_t)
damaxat[tid_t] = float(amaxat[il])
# define travel time file for each template
# for synchronizing CFTs are obtained
# running calcTT01.py
travel_file = "%s%s.ttimes" % (travel_dir, str(itemp))
# print("travel_file = ", travel_file)
# store ttimes info in a dictionary
with open(travel_file, "r") as ttim:
d = dict(x.rstrip().split(None, 1) for x in ttim)
ttim.close()
# print(d)
# find minimum time to recover origin time
time_values = [float(v) for v in d.values()]
min_time_value = min(time_values)
# print("min_time_value == ", min_time_value)
min_time_key = [k for k, v in d.items() if v == str(min_time_value)]
# print("key, mintime == ", min_time_key, min_time_value)
# clear global_variable
stream_cft.clear()
stcc = Stream()
for nn in networks:
for ss in stations:
for ich in channels:
stream_cft += process_input(
itemp, nn, ss, ich, stream_df
)
stall.clear()
stcc.clear()
stnew = Stream()
tr = Trace()
tc_cft = Trace()
tsnew = UTCDateTime()
# seconds in 24 hours
nfile = len(stream_cft)
tstart = np.empty(nfile)
tend = np.empty(nfile)
tdif = np.empty(nfile)
for idx, tc_cft in enumerate(stream_cft):
# get station name from trace
sta = tc_cft.stats.station
chan = tc_cft.stats.channel
net = tc_cft.stats.network
delta = tc_cft.stats.delta
npts = (h24 / nchunk) / delta
s = "%s.%s.%s" % (net, sta, chan)
tdif[idx] = float(d[s])
for idx, tc_cft in enumerate(stream_cft):
# get stream starttime
tstart[idx] = tc_cft.stats.starttime + tdif[idx]
# waveforms should have the same
# number of npts
# and should be synchronized to the
# S-wave travel time
secs = (h24 / nchunk) + 60
tend[idx] = tstart[idx] + secs
check_npts = (tend[idx] - tstart[idx]) / tc_cft.stats.delta
ts = UTCDateTime(tstart[idx], precision=utc_prec)
te = UTCDateTime(tend[idx], precision=utc_prec)
stall += tc_cft.trim(
starttime=ts,
endtime=te,
nearest_sample=True,
pad=True,
fill_value=0,
)
tstart = min([tr.stats.starttime for tr in stall])
df = stall[0].stats.sampling_rate
npts = stall[0].stats.npts
# compute mean cross correlation from the stack of
# CFTs (see stack function)
ccmad, tdifmin = stack(
stall, df, tstart, npts, stdup, stddown, nch_min
)
print("tdifmin == ", tdifmin)
if tdifmin is not None:
# compute mean absolute deviation of abs(ccmad)
tstda = mad(ccmad.data)
# define threshold as 9 times std and quality index
thresholdd = factor_thre * tstda
# Trace ccmad is stored in a Stream
stcc = Stream(traces=[ccmad])
# Run coincidence trigger on a single CC trace
# resulting from the CFTs stack
# essential threshold parameters
# Cross correlation thresholds
xcor_cut = thresholdd
thr_on = thresholdd
thr_off = thresholdd - 0.15 * thresholdd
thr_coincidence_sum = 1.0
similarity_thresholds = {"BH": thr_on}
trigger_type = None
triglist = coincidence_trigger(
trigger_type,
thr_on,
thr_off,
stcc,
thr_coincidence_sum,
trace_ids=None,
similarity_thresholds=similarity_thresholds,
delete_long_trigger=False,
trigger_off_extension=3.0,
details=True,
)
ntrig = len(triglist)
tt = np.empty(ntrig)
cs = np.empty(ntrig)
nch = np.empty(ntrig)
cft_ave = np.empty(ntrig)
crt = np.empty(ntrig)
cft_ave_trg = np.empty(ntrig)
crt_trg = np.empty(ntrig)
nch3 = np.empty(ntrig)
nch5 = np.empty(ntrig)
nch7 = np.empty(ntrig)
nch9 = np.empty(ntrig)
mm = np.empty(ntrig)
timex = UTCDateTime()
tdifmin = min(tdif[0:])
for itrig, trg in enumerate(triglist):
# tdifmin is computed for contributing channels
# within the stack function
#
if tdifmin == min_time_value:
tt[itrig] = trg["time"] + min_time_value
elif tdifmin != min_time_value:
diff_time = min_time_value - tdifmin
tt[itrig] = trg["time"] + diff_time + min_time_value
cs[itrig] = trg["coincidence_sum"]
cft_ave[itrig] = trg["cft_peak_wmean"]
crt[itrig] = trg["cft_peaks"][0] / tstda
# traceID = trg['trace_ids']
# check single channel CFT
[
nch[itrig],
cft_ave[itrig],
crt[itrig],
cft_ave_trg[itrig],
crt_trg[itrig],
nch3[itrig],
nch5[itrig],
nch7[itrig],
nch9[itrig],
] = csc(
stall,
stcc,
trg,
tstda,
sample_tol,
cc_threshold,
nch_min,
day,
itemp,
itrig,
f1,
)
if int(nch[itrig]) >= nch_min:
nn = len(stream_df)
# nn=len(stt)
amaxac = np.zeros(nn)
md = np.zeros(nn)
# for each trigger, detrended,
# and filtered continuous
# data channels are trimmed and
# amplitude useful to
# estimate magnitude is measured.
damaxac = {}
mchan = {}
timestart = UTCDateTime()
timex = UTCDateTime(tt[itrig])
for il, tc in enumerate(stream_df):
ss = tc.stats.station
ich = tc.stats.channel
netwk = tc.stats.network
if stt.select(
station=ss, channel=ich
).__nonzero__():
ttt = stt.select(station=ss, channel=ich)[0]
s = "%s.%s.%s" % (netwk, ss, ich)
# print " s ==", s
uts = UTCDateTime(ttt.stats.starttime).timestamp
utr = UTCDateTime(reft).timestamp
if tdifmin <= 0:
timestart = (
timex + abs(tdifmin) + (uts - utr)
)
elif tdifmin > 0:
timestart = (
timex - abs(tdifmin) + (uts - utr)
)
timend = timestart + temp_length
ta = tc.copy()
ta.trim(
starttime=timestart,
endtime=timend,
pad=True,
fill_value=0,
)
amaxac[il] = max(abs(ta.data))
tid_c = "%s.%s" % (ss, ich)
damaxac[tid_c] = float(amaxac[il])
dct = damaxac[tid_c]
dtt = damaxat[tid_c]
if dct != 0 and dtt != 0:
md[il] = mag_detect(
mt, damaxat[tid_c], damaxac[tid_c]
)
mchan[tid_c] = md[il]
str00 = "%s %s\n" % (tid_c, mchan[tid_c])
f2.write(str00)
mdr = reject_moutliers(md, 1)
mm[itrig] = round(np.mean(mdr), 2)
cft_ave[itrig] = round(cft_ave[itrig], 3)
crt[itrig] = round(crt[itrig], 3)
cft_ave_trg[itrig] = round(cft_ave_trg[itrig], 3)
crt_trg[itrig] = round(crt_trg[itrig], 3)
str33 = (
"%s %s %s %s %s %s %s %s %s "
"%s %s %s %s %s %s %s\n"
% (
day[0:6],
str(itemp),
str(itrig),
str(UTCDateTime(tt[itrig])),
str(mm[itrig]),
str(mt),
str(nch[itrig]),
str(tstda),
str(cft_ave[itrig]),
str(crt[itrig]),
str(cft_ave_trg[itrig]),
str(crt_trg[itrig]),
str(nch3[itrig]),
str(nch5[itrig]),
str(nch7[itrig]),
str(nch9[itrig]),
)
)
f1.write(str33)
f2.write(str33)
str1 = "%s %s %s %s %s %s %s %s\n" % (
str(itemp),
str(UTCDateTime(tt[itrig])),
str(mm[itrig]),
str(cft_ave[itrig]),
str(crt[itrig]),
str(cft_ave_trg[itrig]),
str(crt_trg[itrig]),
str(int(nch[itrig])),
)
f.write(str1)
else:
str_except2 = "%s %s %s %s %s\n" % (
day[0:6],
str(itemp),
str(t1),
str(t2),
" num. correlograms lower than nch_min",
)
f3.write(str_except2)
pass
else:
str_except1 = "%s %s %s %s %s\n" % (
day[0:6],
str(itemp),
str(t1),
str(t2),
" num. 24h channels lower than nch_min",
)
f3.write(str_except1)
pass
else:
str_except0 = "%s %s %s\n" % (
day[0:6],
str(itemp),
" num. templates lower than nch_min",
)
f3.write(str_except0)
pass
f1.close()
f2.close()
f3.close()
f.close()
print(" elapsed time ", perf_counter() - start_time, " seconds")
| gpl-3.0 |
pierre-chaville/automlk | automlk/graphs.py | 1 | 21305 | import logging
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as plt
import seaborn.apionly as sns
import itertools
import pickle
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
from .config import METRIC_NULL
from .context import get_dataset_folder
log = logging.getLogger(__name__)
try:
from wordcloud import WordCloud
import_wordcloud = True
except:
import_wordcloud = False
TRANSPARENT = False
SNS_STYLE = "whitegrid"
def graph_histogram(dataset_id, col, is_categorical, values, part='train'):
"""
generate the histogram of column col of the dataset
:param dataset_id: dataset id
:param col: column name
:param is_categorical: is the column categorical
:param values: values of the column
:param part: set (train, test)
:return: None
"""
try:
for dark, theme in [(True, 'dark_background'), (False, 'seaborn-whitegrid')]:
with plt.style.context(theme, after_reset=True):
plt.figure(figsize=(7, 7))
if is_categorical:
df = pd.DataFrame(values)
df.columns = ['y']
encoder = LabelEncoder()
df['y'] = encoder.fit_transform(df['y'])
values = df['y'].values
sns.distplot(values, kde=False)
x_labels = encoder.inverse_transform(list(range(max(values) + 1)))
plt.xticks(list(range(max(values) + 1)), x_labels, rotation=90)
else:
sns.distplot(values)
plt.title('distribution of %s (%s set)' % (col, part))
plt.xlabel('values')
plt.ylabel('frequencies')
__save_fig(dataset_id, '_hist_%s_%s' % (part, col), dark)
except:
log.error('error in graph_histogram with dataset_id %s' % dataset_id)
def graph_correl_features(dataset, df):
"""
generates the graph of correlated features (heatmap matrix)
:param dataset: dataset object
:param df: data (as a dataframe)
:return: None
"""
try:
# convert categorical to numerical
for col in dataset.cat_cols:
encoder = LabelEncoder()
df[col] = encoder.fit_transform(df[col].map(str))
# create correlation matrix with pandas
corr = df.corr()
# display heatmap
for dark, theme in [(True, 'dark_background'), (False, 'seaborn-whitegrid')]:
with plt.style.context(theme, after_reset=True):
if dataset.n_cols > 50:
plt.figure(figsize=(10, 10))
elif dataset.n_cols > 20:
plt.figure(figsize=(8, 8))
elif dataset.n_cols > 10:
plt.figure(figsize=(7, 7))
else:
plt.figure(figsize=(6, 6))
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool), cmap=sns.diverging_palette(220, 10, as_cmap=True),
square=True)
plt.title('correlation map of the features')
plt.xticks(rotation=90)
plt.yticks(rotation=0)
plt.savefig(get_dataset_folder(dataset.dataset_id) + '/graphs/_correl.png', transparent=TRANSPARENT)
__save_fig(dataset.dataset_id, '_correl', dark)
except:
log.error('error in graph_correl_features with dataset_id %s' % dataset.dataset_id)
def __get_best_scores(scores):
# returns the list of best scores over time
# we will generate a list of the best values values over time
best_scores = []
best = METRIC_NULL
for x in scores:
if x < best:
best = x
best_scores.append(x)
else:
best_scores.append(best)
return np.abs(best_scores)
def graph_history_search(dataset, df_search, best_models, level):
"""
creates a graph of the best scores along searches
:param dataset: dataset object
:param df_search: dataframe of the search history
:param best_models: selection within df_search with best models
:param level: model level (0: standard, 1: ensembles)
:return: None
"""
try:
if len(df_search[df_search.level == level]) < 1:
return
scores = df_search[df_search.level == level].sort_index().cv_mean.values
if dataset.best_is_min:
# positive scores (e.g loss or error: min is best)
y_lim1, y_lim2 = __standard_range(best_models.cv_mean.abs(), 0, 50)
y_lim1 -= (y_lim2 - y_lim1) * 0.1
else:
# negative scores (e.g. auc: max is best)
y_lim1, y_lim2 = __standard_range(best_models.cv_mean.abs(), 50, 100)
y_lim2 += (y_lim2 - y_lim1) * 0.1
best_scores = __get_best_scores(scores)
for dark, theme in [(True, 'dark_background'), (False, 'seaborn-whitegrid')]:
with plt.style.context(theme, after_reset=True):
plt.figure(figsize=(6, 6))
plt.plot(list(range(len(best_scores))), best_scores)
plt.title('best score over time (level=%d)' % level)
plt.xlabel('total searches')
plt.ylabel('score')
plt.ylim(y_lim1, y_lim2)
__save_fig(dataset.dataset_id, '_history_' + str(level), dark)
# we will generate a list of the max values values per model
for dark, theme in [(True, 'dark_background'), (False, 'seaborn-whitegrid')]:
with plt.style.context(theme, after_reset=True):
plt.figure(figsize=(6, 6))
for model_name in best_models.model_name.unique()[:5][::-1]:
# scores = np.sort(np.abs(df_search[df_search.model_name == model_name].cv_max.values))[::-1]
best_scores = __get_best_scores(df_search[df_search.model_name == model_name].cv_mean.values)
plt.plot(list(range(len(best_scores))), best_scores, label=model_name)
plt.title('best score for 5 best models (level=%d)' % level)
plt.xlabel('searches')
plt.ylabel('score')
plt.ylim(y_lim1, y_lim2)
if dataset.best_is_min:
plt.legend(loc=1)
else:
plt.legend(loc=4)
__save_fig(dataset.dataset_id, '_models_' + str(level), dark)
except:
log.error('error in graph_history_search with dataset_id %s' % dataset.dataset_id)
def graph_history_scan(dataset, df_search, best_models):
"""
creates a graph of scores with various percentages of data (20%, 40%, .., 100%) for the 5 best models
:param dataset: dataset object
:param df_search: dataframe of the search history
:param best_models: selection within df_search with best models
:return: None
"""
try:
df = df_search[df_search.model_name.isin(best_models)]
if len(df) < 1:
return
if dataset.best_is_min:
# positive scores (e.g loss or error: min is best)
y_lim1, y_lim2 = __standard_range(df.cv_mean.abs(), 0, 100)
y_lim1 -= (y_lim2 - y_lim1) * 0.1
else:
# negative scores (e.g. auc: max is best)
y_lim1, y_lim2 = __standard_range(df.cv_mean.abs(), 0, 100)
y_lim2 += (y_lim2 - y_lim1) * 0.1
for dark, theme in [(True, 'dark_background'), (False, 'seaborn-whitegrid')]:
with plt.style.context(theme, after_reset=True):
plt.figure(figsize=(6, 6))
for model_name in best_models[::-1]:
scan_scores = df[df.model_name == model_name].sort_values(by='pct')
plt.plot(scan_scores.pct.values, np.abs(scan_scores.cv_mean.values), label=model_name)
plt.title('performance on data')
plt.xlabel('% data')
plt.ylabel('score')
plt.ylim(y_lim1, y_lim2)
if dataset.best_is_min:
plt.legend(loc=1)
else:
plt.legend(loc=4)
__save_fig(dataset.dataset_id, '_scan', dark)
except:
log.error('error in graph_history_scan with dataset_id %s' % dataset.dataset_id)
def graph_predict_regression(dataset, round_id, y, y_pred, part='eval'):
"""
generate a graph prediction versus actuals
:param dataset: dataset object
:param round_id: id of the round
:param y: actual values
:param y_pred: predicted values
:param part: part of the dataset
:return: None
"""
try:
for dark, theme in [(True, 'dark_background'), (False, 'seaborn-whitegrid')]:
with plt.style.context(theme, after_reset=True):
plt.figure(figsize=(6, 6))
# plot a graph prediction versus actuals
df = pd.DataFrame([y, y_pred]).transpose()
df.columns = ['actuals', 'predict']
g = sns.jointplot(x='actuals', y='predict', data=df, kind="kde", size=6)
g.plot_joint(plt.scatter, s=5, alpha=0.8)
g.ax_joint.collections[0].set_alpha(0)
mn, mx = __standard_range(y, 1, 99)
plt.plot((mn, mx), (mn, mx), color='r', lw=0.7)
plt.xlim(mn, mx)
plt.ylim(mn, mx)
plt.title('%s' % part)
__save_fig(dataset.dataset_id, 'predict_%s_%s' % (part, round_id), dark)
except:
log.error('error in graph_predict_regression with dataset_id %s' % dataset.dataset_id)
def graph_predict_classification(dataset, round_id, y, y_pred, part='eval'):
"""
generate a confusion matrix
:param dataset: dataset object
:param round_id: id of the round
:param y: actual values
:param y_pred: predicted values
:param part: part of the dataset
:return: None
"""
try:
for dark, theme in [(True, 'dark_background'), (False, 'seaborn-whitegrid')]:
with plt.style.context(theme, after_reset=True):
for normalize in [False, True]:
plt.figure(figsize=(8, 6))
# convert proba to classes
y_pred_class = np.argmax(y_pred, axis=1)
# plot a confusion matrix
cnf_matrix = confusion_matrix(y, y_pred_class)
np.set_printoptions(precision=2)
plt.imshow(cnf_matrix, interpolation='nearest', cmap=plt.cm.Blues)
plt.colorbar()
tick_marks = np.arange(dataset.y_n_classes)
plt.xticks(tick_marks, dataset.y_class_names, rotation=45)
plt.yticks(tick_marks, dataset.y_class_names)
fmt = '.2f' if normalize else 'd'
thresh = cnf_matrix.max() / 2.
for i, j in itertools.product(range(cnf_matrix.shape[0]), range(cnf_matrix.shape[1])):
plt.text(j, i, format(cnf_matrix[i, j], fmt),
horizontalalignment="center",
color="white" if cnf_matrix[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('confusion matrix (%s set)' % part)
name = "predict_norm" if normalize else "predict"
__save_fig(dataset.dataset_id, '%s_%s_%s' % (name, part, round_id), dark)
# save confusion matrix
__save_cnf_matrix(dataset.dataset_id, round_id, part, dataset.y_class_names, cnf_matrix)
except:
log.error('error in graph_predict_classification with dataset_id %s' % dataset.dataset_id)
def graph_histogram_regression(dataset, round_id, y, part='eval'):
"""
generate the histogram of predictions
:param dataset: dataset object
:param round_id: id of the round (model)
:param y: prediction values
:param part: set (eval / train set)
:return: None
"""
try:
for dark, theme in [(True, 'dark_background'), (False, 'seaborn-whitegrid')]:
with plt.style.context(theme, after_reset=True):
plt.figure(figsize=(6, 6))
sns.distplot(y)
plt.title('histogram of predictions (%s set)' % part)
plt.xlabel('values')
plt.ylabel('frequencies')
__save_fig(dataset.dataset_id, 'hist_%s_%s' % (part, round_id), dark)
except:
log.error('error in graph_histogram_regression with dataset_id %s' % dataset.dataset_id)
def graph_histogram_classification(dataset, round_id, y, part='eval'):
"""
generate the histogram of predictions
:param dataset: dataset object
:param round_id: id of the round (model)
:param y: prediction values
:param part: set (eval / train set)
:return: None
"""
try:
for dark, theme in [(True, 'dark_background'), (False, 'seaborn-whitegrid')]:
with plt.style.context(theme, after_reset=True):
plt.figure(figsize=(6, 6))
for i, name in enumerate(dataset.y_class_names):
sns.distplot(y[:, i], hist=False, label=name)
plt.title('histogram of probabilities (%s set)' % part)
plt.xlabel('values')
plt.ylabel('frequencies')
plt.legend()
__save_fig(dataset.dataset_id, 'hist_%s_%s' % (part, round_id), dark)
except:
log.error('error in graph_histogram_classification with dataset_id %s' % dataset.dataset_id)
def graph_regression_numerical(dataset_id, df, col, target):
"""
display a reg scatter plot graph of col in x axis and target in y axis
:param dataset_id: id of the dataset
:param df: dataframe, with col and target values
:param col: name of column
:param target: name of target column
:return:
"""
try:
for dark, theme in [(True, 'dark_background'), (False, 'seaborn-whitegrid')]:
with plt.style.context(theme, after_reset=True):
g = sns.jointplot(x=col, y=target, data=df, kind="kde", size=7)
g.plot_joint(plt.scatter, s=5, alpha=0.7)
g.ax_joint.collections[0].set_alpha(0)
plt.xlim(__standard_range(df[col].values, 1, 99))
plt.ylim(__standard_range(df[target].values, 1, 99))
__save_fig(dataset_id, '_col_' + col, dark)
except:
log.error('error in graph_regression_numerical with dataset_id %s' % dataset_id)
def graph_regression_categorical(dataset_id, df, col, target):
"""
display a boxplot graph of col in x axis and target in y axis
:param dataset_id: id of the dataset
:param df: dataframe, with col and target values
:param col: name of column
:param target: name of target column
:return:
"""
try:
for dark, theme in [(True, 'dark_background'), (False, 'seaborn-whitegrid')]:
with plt.style.context(theme, after_reset=True):
encoder = LabelEncoder()
x = encoder.fit_transform(df[col].values)
x_labels = encoder.inverse_transform(list(range(max(x) + 1)))
fig, ax = plt.subplots(figsize=(8, 7))
sns.boxplot(x=col, y=target, data=df, ax=ax)
plt.xticks(list(range(max(x) + 1)), x_labels, rotation=90)
plt.ylim(__standard_range(df[target].values, 1, 99))
__save_fig(dataset_id, '_col_' + col, dark)
except:
log.error('error in graph_regression_categorical with dataset_id %s' % dataset_id)
def graph_classification_numerical(dataset_id, df, col, target):
"""
display a horizontal boxplot graph of col in x axis and target in y axis
:param dataset_id: id of the dataset
:param df: dataframe, with col and target values
:param col: name of column
:param target: name of target column
:return:
"""
try:
for dark, theme in [(True, 'dark_background'), (False, 'seaborn-whitegrid')]:
with plt.style.context(theme, after_reset=True):
plt.figure(figsize=(8, 7))
encoder = LabelEncoder()
y = encoder.fit_transform(df[target].values)
y_labels = encoder.inverse_transform(list(range(max(y) + 1)))
sns.boxplot(x=col, y=target, data=df, orient='h')
plt.xlim(__standard_range(df[col].values, 1, 99))
plt.yticks(list(range(max(y) + 1)), y_labels)
__save_fig(dataset_id, '_col_' + col, dark)
except:
log.error('error in graph_classification_numerical with dataset_id %s' % dataset_id)
def graph_classification_categorical(dataset_id, df, col, target):
"""
display a heatmap of col in x axis and target in y axis
:param dataset_id: id of the dataset
:param df: dataframe, with col and target values
:param col: name of column
:param target: name of target column
:return:
"""
try:
for dark, theme in [(True, 'dark_background'), (False, 'seaborn-whitegrid')]:
with plt.style.context(theme, after_reset=True):
df['count'] = 1
plt.figure(figsize=(8, 7))
# convert col and target in numerical
encoder = LabelEncoder()
x = encoder.fit_transform(df[col].values)
x_labels = encoder.inverse_transform(list(range(max(x) + 1)))
y = encoder.fit_transform(df[target].values)
y_labels = encoder.inverse_transform(list(range(max(y) + 1)))
data = pd.pivot_table(df[[col, target, 'count']], values='count', index=target, columns=col, aggfunc=np.sum)
sns.heatmap(data=data, cmap=sns.diverging_palette(220, 10, as_cmap=True), square=True)
plt.xticks([x + 0.5 for x in list(range(max(x) + 1))], x_labels, rotation=90)
plt.yticks([x + 0.5 for x in list(range(max(y) + 1))], y_labels, rotation=0)
__save_fig(dataset_id, '_col_' + col, dark)
except:
log.error('error in classification_categorical with dataset_id %s' % dataset_id)
def graph_text(dataset_id, df, col):
"""
display a wordcloud of the data of column col
:param dataset_id: id of the dataset
:param df: dataframe, with col values
:param col: name of column
:return:
"""
try:
if not import_wordcloud:
return
txt = " ".join([str(x) for x in df[col].values])
for dark, theme in [(True, 'black'), (False, 'white')]:
wc = WordCloud(background_color=theme, max_words=200, width=800, height=800)
# generate word cloud
wc.generate(txt)
if dark:
wc.to_file(get_dataset_folder(dataset_id) + '/graphs_dark/_col_%s.png' % col)
else:
wc.to_file(get_dataset_folder(dataset_id) + '/graphs/_col_%s.png' % col)
except:
log.error('error in graph_text with dataset_id %s' % dataset_id)
def __standard_range(x, pmin, pmax):
"""
calculate standard range with percentiles from pmin to pmax, eg. 1% to 99%
:param x: list of values or np.array of dim 1
:param pmin: percentile min (from 0 to 100)
:param pmax: percentile max (from 0 to 100)
:return: tuple (range_min, range_max)
"""
y = np.sort(np.nan_to_num(x, 0))
l = len(y)
p1 = int(l * pmin / 100.)
p2 = int(l * pmax / 100.) - 1
return y[p1], y[p2]
def __save_fig(dataset_id, graph_name, dark):
"""
saves figure in graph directories, in 2 colors (dark and white)
:param dataset_id: dataset id
:param graph_name: name of the graph
:param dark: flag if dark theme
:return:
"""
if dark:
# dark transparent
plt.savefig(get_dataset_folder(dataset_id) + '/graphs_dark/%s.png' % graph_name, transparent=True)
else:
# white opaque
plt.savefig(get_dataset_folder(dataset_id) + '/graphs/%s.png' % graph_name, transparent=False)
plt.close()
def __save_cnf_matrix(dataset_id, round_id, part, y_names, cnf_matrix):
"""
save confusion matrix
:param dataset_id: dataset id
:param round_id: round id
:param part: 'eval' or 'test'
:param y_names: y labels
:param cnf_matrix: confusion matrix (actual / predict)
:return: None
"""
pickle.dump([y_names, cnf_matrix], open(get_dataset_folder(dataset_id) + '/predict/%s_%s_cnf.pkl' % (round_id, part), 'wb'))
def get_cnf_matrix(dataset_id, round_id, part):
"""
load confusion matrix
:param dataset_id: dataset id
:param round_id: round id
:param part: 'eval' or 'test'
:return: names, matrix, sums (of axis=1)
"""
try:
names, matrix = pickle.load(open(get_dataset_folder(dataset_id) + '/predict/%s_%s_cnf.pkl' % (round_id, part), 'rb'))
sums = np.sum(matrix, axis=1)
return names, matrix, sums
except:
return [], [], []
| mit |
mne-tools/mne-tools.github.io | dev/_downloads/ceb76325480611dc7a2e973a3b7a782c/20_dipole_fit.py | 5 | 5301 | # -*- coding: utf-8 -*-
"""
============================================================
Source localization with equivalent current dipole (ECD) fit
============================================================
This shows how to fit a dipole :footcite:`Sarvas1987` using mne-python.
For a comparison of fits between MNE-C and mne-python, see
`this gist <https://gist.github.com/larsoner/ca55f791200fe1dc3dd2>`__.
"""
from os import path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.forward import make_forward_dipole
from mne.evoked import combine_evoked
from mne.label import find_pos_in_annot
from mne.simulation import simulate_evoked
from nilearn.plotting import plot_anat
from nilearn.datasets import load_mni152_template
data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_ave = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_bem = op.join(subjects_dir, 'sample', 'bem', 'sample-5120-bem-sol.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
fname_surf_lh = op.join(subjects_dir, 'sample', 'surf', 'lh.white')
###############################################################################
# Let's localize the N100m (using MEG only)
evoked = mne.read_evokeds(fname_ave, condition='Right Auditory',
baseline=(None, 0))
evoked.pick_types(meg=True, eeg=False)
evoked_full = evoked.copy()
evoked.crop(0.07, 0.08)
# Fit a dipole
dip = mne.fit_dipole(evoked, fname_cov, fname_bem, fname_trans)[0]
# Plot the result in 3D brain with the MRI image.
dip.plot_locations(fname_trans, 'sample', subjects_dir, mode='orthoview')
###############################################################################
# Plot the result in 3D brain with the MRI image using Nilearn
# In MRI coordinates and in MNI coordinates (template brain)
trans = mne.read_trans(fname_trans)
subject = 'sample'
mni_pos = mne.head_to_mni(dip.pos, mri_head_t=trans,
subject=subject, subjects_dir=subjects_dir)
mri_pos = mne.head_to_mri(dip.pos, mri_head_t=trans,
subject=subject, subjects_dir=subjects_dir)
# In the meantime let's find an anatomical label for the best fitted dipole
best_dip_id = dip.gof.argmax()
best_dip_mri_pos = mri_pos[best_dip_id]
label = find_pos_in_annot(best_dip_mri_pos, subject=subject,
subjects_dir=subjects_dir,
annot='aparc.a2009s+aseg')
# Draw dipole position on MRI scan and add anatomical label from parcellation
t1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
fig_T1 = plot_anat(t1_fname, cut_coords=mri_pos[0],
title=f'Dipole location: {label}')
template = load_mni152_template()
fig_template = plot_anat(template, cut_coords=mni_pos[0],
title='Dipole loc. (MNI Space)')
###############################################################################
# Calculate and visualise magnetic field predicted by dipole with maximum GOF
# and compare to the measured data, highlighting the ipsilateral (right) source
fwd, stc = make_forward_dipole(dip, fname_bem, evoked.info, fname_trans)
pred_evoked = simulate_evoked(fwd, stc, evoked.info, cov=None, nave=np.inf)
# find time point with highest GOF to plot
best_idx = np.argmax(dip.gof)
best_time = dip.times[best_idx]
print('Highest GOF %0.1f%% at t=%0.1f ms with confidence volume %0.1f cm^3'
% (dip.gof[best_idx], best_time * 1000,
dip.conf['vol'][best_idx] * 100 ** 3))
# remember to create a subplot for the colorbar
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=[10., 3.4],
gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1],
top=0.85))
vmin, vmax = -400, 400 # make sure each plot has same colour range
# first plot the topography at the time of the best fitting (single) dipole
plot_params = dict(times=best_time, ch_type='mag', outlines='skirt',
colorbar=False, time_unit='s')
evoked.plot_topomap(time_format='Measured field', axes=axes[0], **plot_params)
# compare this to the predicted field
pred_evoked.plot_topomap(time_format='Predicted field', axes=axes[1],
**plot_params)
# Subtract predicted from measured data (apply equal weights)
diff = combine_evoked([evoked, pred_evoked], weights=[1, -1])
plot_params['colorbar'] = True
diff.plot_topomap(time_format='Difference', axes=axes[2:], **plot_params)
fig.suptitle('Comparison of measured and predicted fields '
'at {:.0f} ms'.format(best_time * 1000.), fontsize=16)
fig.tight_layout()
###############################################################################
# Estimate the time course of a single dipole with fixed position and
# orientation (the one that maximized GOF) over the entire interval
dip_fixed = mne.fit_dipole(evoked_full, fname_cov, fname_bem, fname_trans,
pos=dip.pos[best_idx], ori=dip.ori[best_idx])[0]
dip_fixed.plot(time_unit='s')
##############################################################################
# References
# ----------
# .. footbibliography::
| bsd-3-clause |
sangwook236/SWDT | sw_dev/python/ext/test/high_performance_computing/spark/pyspark_descriptive_statistics.py | 2 | 4298 | #!/usr/bin/env python
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession, SQLContext
import pyspark.sql.types as types
import matplotlib.pyplot as plt
from bokeh.plotting import figure, show, output_file
from bokeh.io import output_notebook
import traceback, sys
def describe_statistics():
spark = SparkSession.builder.appName('describe-statistics').getOrCreate()
sc = spark.sparkContext
sc.setLogLevel('WARN')
# Read a dataset in and remove its header.
fraud = sc.textFile('dataset/ccFraud.csv.gz')
header = fraud.first()
fraud = fraud.filter(lambda row: row != header).map(lambda row: [int(elem) for elem in row.split(',')])
# Create a schema.
fields = [*[types.StructField(h[1:-1], types.IntegerType(), True) for h in header.split(',')]]
schema = types.StructType(fields)
# Create a DataFrame.
fraud_df = spark.createDataFrame(fraud, schema)
fraud_df.printSchema()
# For a better understanding of categorical columns.
fraud_df.groupby('gender').count().show()
# Describe for the truly numerical features.
numerical = ['balance', 'numTrans', 'numIntlTrans']
desc = fraud_df.describe(numerical)
desc.show()
# Check the skeweness.
fraud_df.agg({'balance': 'skewness'}).show()
# Check the correlation between the features.
fraud_df.corr('balance', 'numTrans')
# Create a correlations matrix.
n_numerical = len(numerical)
corr = []
for i in range(0, n_numerical):
temp = [None] * i
for j in range(i, n_numerical):
temp.append(fraud_df.corr(numerical[i], numerical[j]))
corr.append(temp)
def visualize_data():
#%matplotlib inline
plt.style.use('ggplot')
#output_notebook()
spark = SparkSession.builder.appName('visualize-data').getOrCreate()
sc = spark.sparkContext
sc.setLogLevel('WARN')
# Read a dataset in and remove its header.
fraud = sc.textFile('dataset/ccFraud.csv.gz')
header = fraud.first()
fraud = fraud.filter(lambda row: row != header).map(lambda row: [int(elem) for elem in row.split(',')])
# Create a schema.
fields = [*[types.StructField(h[1:-1], types.IntegerType(), True) for h in header.split(',')]]
schema = types.StructType(fields)
# Create a DataFrame.
fraud_df = spark.createDataFrame(fraud, schema)
# Histogram.
hists = fraud_df.select('balance').rdd.flatMap(lambda row: row).histogram(20)
# Plot a histogram (#1).
data = {
'bins': hists[0][:-1],
'freq': hists[1]
}
plt.bar(data['bins'], data['freq'], width=2000)
plt.title('Histogram of "balance"')
plt.show()
#p = figure(plot_height=600, plot_width=600, title='Histogram of "balance"', x_axis_label='bins', y_axis_label='freq')
#p.quad(bottom=0, top=data['bins'], left=data['freq'], right=None, fill_color='red', line_color='black')
#show(p)
# Plot a histogram (#2).
data_driver = {'obs': fraud_df.select('balance').rdd.flatMap(lambda row: row).collect()}
plt.hist(data_driver['obs'], bins=20)
plt.title('Histogram of "balance" using .hist()')
plt.show()
# Sample our dataset at 0.02%.
numerical = ['balance', 'numTrans', 'numIntlTrans']
data_sample = fraud_df.sampleBy('gender', {1: 0.0002, 2: 0.0002}).select(numerical)
# Plot a scatter plot.
data_multi = dict([(elem, data_sample.select(elem).rdd.flatMap(lambda row: row).collect()) for elem in numerical])
output_file('scatter.html')
p = figure(plot_width=400, plot_height=400)
p.circle(data_multi['balance'], data_multi['numTrans'], size=3, color='navy', alpha=0.5)
p.xaxis.axis_label = 'balance'
p.yaxis.axis_label = 'numTrans'
show(p)
def main():
describe_statistics()
visualize_data()
#%%------------------------------------------------------------------
# Usage:
# python pyspark_descriptive_statistics.py
# spark-submit pyspark_descriptive_statistics.py
# spark-submit --master local[4] pyspark_descriptive_statistics.py
# spark-submit --master spark://host:7077 --executor-memory 10g pyspark_descriptive_statistics.py
if '__main__' == __name__:
try:
main()
except:
#ex = sys.exc_info() # (type, exception object, traceback).
##print('{} raised: {}.'.format(ex[0], ex[1]))
#print('{} raised: {}.'.format(ex[0].__name__, ex[1]))
#traceback.print_tb(ex[2], limit=None, file=sys.stdout)
#traceback.print_exception(*sys.exc_info(), limit=None, file=sys.stdout)
traceback.print_exc(limit=None, file=sys.stdout)
| gpl-3.0 |
stargaser/astropy | astropy/visualization/wcsaxes/patches.py | 4 | 3408 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.patches import Polygon
from astropy import units as u
from astropy.coordinates.representation import UnitSphericalRepresentation
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product
__all__ = ['SphericalCircle']
def _rotate_polygon(lon, lat, lon0, lat0):
"""
Given a polygon with vertices defined by (lon, lat), rotate the polygon
such that the North pole of the spherical coordinates is now at (lon0,
lat0). Therefore, to end up with a polygon centered on (lon0, lat0), the
polygon should initially be drawn around the North pole.
"""
# Create a representation object
polygon = UnitSphericalRepresentation(lon=lon, lat=lat)
# Determine rotation matrix to make it so that the circle is centered
# on the correct longitude/latitude.
m1 = rotation_matrix(-(0.5 * np.pi * u.radian - lat0), axis='y')
m2 = rotation_matrix(-lon0, axis='z')
transform_matrix = matrix_product(m2, m1)
# Apply 3D rotation
polygon = polygon.to_cartesian()
polygon = polygon.transform(transform_matrix)
polygon = UnitSphericalRepresentation.from_cartesian(polygon)
return polygon.lon, polygon.lat
class SphericalCircle(Polygon):
"""
Create a patch representing a spherical circle - that is, a circle that is
formed of all the points that are within a certain angle of the central
coordinates on a sphere. Here we assume that latitude goes from -90 to +90
This class is needed in cases where the user wants to add a circular patch
to a celestial image, since otherwise the circle will be distorted, because
a fixed interval in longitude corresponds to a different angle on the sky
depending on the latitude.
Parameters
----------
center : tuple or `~astropy.units.Quantity`
This can be either a tuple of two `~astropy.units.Quantity` objects, or
a single `~astropy.units.Quantity` array with two elements.
radius : `~astropy.units.Quantity`
The radius of the circle
resolution : int, optional
The number of points that make up the circle - increase this to get a
smoother circle.
vertex_unit : `~astropy.units.Unit`
The units in which the resulting polygon should be defined - this
should match the unit that the transformation (e.g. the WCS
transformation) expects as input.
Notes
-----
Additional keyword arguments are passed to `~matplotlib.patches.Polygon`
"""
def __init__(self, center, radius, resolution=100, vertex_unit=u.degree, **kwargs):
# Extract longitude/latitude, either from a tuple of two quantities, or
# a single 2-element Quantity.
longitude, latitude = center
# Start off by generating the circle around the North pole
lon = np.linspace(0., 2 * np.pi, resolution + 1)[:-1] * u.radian
lat = np.repeat(0.5 * np.pi - radius.to_value(u.radian), resolution) * u.radian
lon, lat = _rotate_polygon(lon, lat, longitude, latitude)
# Extract new longitude/latitude in the requested units
lon = lon.to_value(vertex_unit)
lat = lat.to_value(vertex_unit)
# Create polygon vertices
vertices = np.array([lon, lat]).transpose()
super().__init__(vertices, **kwargs)
| bsd-3-clause |
SU-ECE-17-7/hotspotter | hstest/warp_parallel.py | 2 | 6147 | '''
There is an issue with cv2.warpAffine on macs.
This is a test to further investigate the issue.
python -c "import cv2; help(cv2.warpAffine)"
'''
from __future__ import division, print_function
#import matplotlib
#matplotlib.use('Qt4Agg')
import os
import sys
from os.path import dirname, join, expanduser, exists
from PIL import Image
import numpy as np
import multiprocessing
import cv2
from itertools import izip
sys.path.append(join(expanduser('~'), 'code'))
from hotspotter import helpers
#from hotspotter import chip_compute2 as cc2
#from hotspotter import Parallelize as parallel
#from hotspotter.dbgimport import *
#import PyQt4
#from PyQt4 import QtCore
#from PyQt4 import QtGui
#from PyQt4 import QtCore, QtGui
#from PyQt4.Qt import (QAbstractItemModel, QModelIndex, QVariant, QWidget,
#Qt, QObject, pyqtSlot, QKeyEvent)
def try_get_path(path_list):
tried_list = []
for path in path_list:
if path.find('~') != -1:
path = expanduser(path)
tried_list.append(path)
if exists(path):
return path
return (False, tried_list)
def get_lena_fpath():
possible_lena_locations = [
'lena.png',
'~/code/hotspotter/_tpl/extern_feat/lena.png',
'_tpl/extern_feat/lena.png',
'../_tpl/extern_feat/lena.png',
'~/local/lena.png',
'../lena.png',
'/lena.png',
'C:\\lena.png']
lena_fpath = try_get_path(possible_lena_locations)
if not isinstance(lena_fpath, str):
raise Exception('cannot find lena: tried: %r' % (lena_fpath,))
return lena_fpath
try:
test_dir = join(dirname(__file__))
except NameError as ex:
test_dir = os.getcwd()
# Get information
gfpath = get_lena_fpath()
cfpath = join(test_dir, 'tmp_chip.png')
roi = [0, 0, 100, 100]
new_size = (500, 500)
theta = 0
img_path = gfpath
# parallel tasks
nTasks = 20
gfpath_list = [gfpath] * nTasks
cfpath_list = [cfpath+str(ix)+'.png' for ix in xrange(nTasks)]
roi_list = [roi] * nTasks
theta_list = [theta] * nTasks
chipsz_list = [new_size] * nTasks
printDBG = print
def imread2(img_fpath):
try:
img = Image.open(img_fpath)
img = np.asarray(img)
#img = skimage.util.img_as_uint(img)
except Exception as ex:
print('[io] Caught Exception: %r' % ex)
print('[io] ERROR reading: %r' % (img_fpath,))
raise
return img
def _calculate2(func, args):
#printDBG('[parallel] * %s calculating...' % (multiprocessing.current_process().name,))
result = func(*args)
#arg_names = func.func_code.co_varnames[:func.func_code.co_argcount]
#arg_list = [n+'='+str(v) for n,v in izip(arg_names, args)]
#arg_str = '\n *** '+str('\n *** '.join(arg_list))
#printDBG('[parallel] * %s finished:\n ** %s' %
#(multiprocessing.current_process().name,
#func.__name__))
return result
def _worker2(input, output):
printDBG('[parallel] START WORKER input=%r output=%r' % (input, output))
for func, args in iter(input.get, 'STOP'):
#printDBG('[parallel] worker will calculate %r' % (func))
result = _calculate2(func, args)
#printDBG('[parallel] worker has calculated %r' % (func))
output.put(result)
#printDBG('[parallel] worker put result in queue.')
#printDBG('[parallel] worker is done input=%r output=%r' % (input, output))
def mark_progress2(cout):
sys.stdout.write('.')
sys.stdout.flush()
pass
def _compute_in_parallel2(task_list, num_procs, task_lbl='', verbose=True):
'''
Input: task list: [ (fn, args), ... ]
'''
task_queue = multiprocessing.Queue()
done_queue = multiprocessing.Queue()
nTasks = len(task_list)
# queue tasks
for task in iter(task_list):
task_queue.put(task)
# start processes
proc_list = []
for i in xrange(num_procs):
printDBG('[parallel] creating process %r' % (i,))
proc = multiprocessing.Process(target=_worker2, args=(task_queue, done_queue))
proc.start()
proc_list.append(proc_list)
# wait for results
printDBG('[parallel] waiting for results')
sys.stdout.flush()
result_list = []
if verbose:
mark_progress = helpers.progress_func(nTasks, lbl=task_lbl)
for count in xrange(len(task_list)):
#printDBG('[parallel] done_queue.get()')
mark_progress(count)
result_list.append(done_queue.get())
print('')
else:
for i in xrange(nTasks):
done_queue.get()
print('[parallel] ... done')
printDBG('[parallel] stopping children')
# stop children processes
for i in xrange(num_procs):
task_queue.put('STOP')
return result_list
from hotspotter import fileio as io
def extract_chip2(img_path, roi, theta, new_size):
'Crops chip from image ; Rotates and scales; Converts to grayscale'
# Read parent image
np_img = io.imread(img_path)
# Build transformation
(rx, ry, rw, rh) = roi
(rw_, rh_) = new_size
Aff = np.array([[ 2., 0., 0.],
[ 0., 1., 0.]])
print('built transform Aff=\n%r' % Aff)
# Rotate and scale
#chip = cv2.warpAffine(np_img, Aff, (rw_, rh_), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
chip = cv2.warpAffine(np_img, Aff, (rw_, rh_))
#print('warped')
return chip
def parallel_compute2(func, arg_list, num_procs):
task_list = [(func, _args) for _args in izip(*arg_list)]
for task in task_list:
print(task)
result_list = _compute_in_parallel2(task_list, 4)
for result in result_list:
print(result)
extract_arg_list = [gfpath_list, roi_list, theta_list, chipsz_list]
compute_arg_list = [gfpath_list, cfpath_list, roi_list, theta_list, chipsz_list]
num_procs = 4
results = parallel_compute2(extract_chip2, extract_arg_list, num_procs)
#results = parallel.parallel_compute(compute_chip2, compute_arg_list,
#num_procs, lazy=False, common_args=[[]])
print(results)
#from hotspotter import draw_func2 as df2
#df2.imshow(result_list[0])
#exec(df2.present())
| apache-2.0 |
JanetMatsen/Machine_Learning_CSE_546 | HW3/code/not_updated/ridge_regression.py | 2 | 12001 | import numpy as np
import pandas as pd
import scipy.sparse as sp
import scipy.sparse.linalg as splin
import time;
from classification_base import ClassificationBase
class RidgeMulti(ClassificationBase):
"""
Train multiple ridge models.
"""
def __init__(self, X, y, lam, W=None, verbose=False, sparse=True,
test_X=None, test_y = None, kernelized=False):
"""
test_X, test_y are for compatibility only, because the questions for
other methods require knowing test data during fitting.
"""
super(RidgeMulti, self).__init__(X=X, y=y, W=W, sparse=sparse)
self.sparse = sparse
if self.sparse:
assert lam != 0, "can't invert the big stuff with lambda = 0."
self.X = sp.csc_matrix(self.X)
self.Y = sp.csc_matrix(self.Y)
self.lam = lam
self.W = None # don't want to have W before solving!
self.matrix_work = None
self.verbose = verbose
self.kernelized = kernelized
def get_weights(self):
if self.sparse:
return self.W.toarray()
if not self.sparse:
return self.W
def apply_weights(self):
if self.verbose:
print("Apply weights to H(X): {}".format(time.asctime(time.localtime(time.time()))))
# Apply weights
if self.sparse:
assert type(self.W) == sp.csc_matrix or type(self.W) == sp.csr_matrix, \
"type of W is {}".format(type(self.W))
assert type(self.X) == sp.csc_matrix, \
"type of W is {}".format(type(self.X))
prod = self.X.dot(self.W)
if self.verbose:
print("Done applying weights to H(X): {}".format(time.asctime(time.localtime(time.time()))))
if type(prod) == sp.csc_matrix:
return prod.toarray()
else:
return prod
else:
if self.verbose:
print("Done applying weights to H(X): {}".format(time.asctime(time.localtime(time.time()))))
return self.X.dot(self.W)
def optimize(self):
# When solving multiclass, (X^TX + lambdaI)-1X^T is shared
# solve it once and share it with all the regressors.
# find lambda*I_D + X^T*X
if self.verbose: print("optimize: multiply matrices before inversion.")
# Get (X^TX + lambdaI)
if self.sparse:
piece_to_invert = sp.csc_matrix(sp.identity(self.d)*self.lam) + \
self.X.T.dot(self.X)
else:
piece_to_invert = np.identity(self.d)*self.lam + self.X.T.dot(self.X)
assert piece_to_invert.shape == (self.d, self.d)
# Invert (X^TX + lambdaI)
if self.verbose:
print("invert matrix:")
print("time: {}".format(time.asctime(time.localtime(time.time()))))
if self.sparse:
inverted_piece = splin.inv(piece_to_invert)
else:
inverted_piece = np.linalg.inv(piece_to_invert)
# Dot with X^T
if self.verbose:
print("time: {}".format(time.asctime(time.localtime(time.time()))))
print("dot with X^T:")
self.matrix_work = inverted_piece.dot(self.X.T)
assert self.matrix_work.shape == (self.d, self.N)
if self.verbose:
print("train the {} classifiers:".format(self.C))
# Train C classifiers.
self.W = self.matrix_work.dot(self.Y)
if self.verbose:
print("done generating weights.")
assert self.W.shape == (self.d, self.C)
return self.W
def kernelized_optimize(self):
# fact: H^T(HH^T + lambda*I_N) == (lambda*I_d + H^TH)H^T
# instead of inverting a dxd matrix, we invert an nxn matrix.
# So our ridge formula becomes:
# (lambda*I_d + H^TH)^(-1)H^T = H^T(HH^T + lambdaI_N)^(-1)
if self.sparse:
piece_to_invert = self.X.dot(self.X.T) + sp.identity(self.N)*self.lam
else:
piece_to_invert = self.X.dot(self.X.T) + np.identity(self.N)*self.lam
assert piece_to_invert.shape == (self.N, self.N) # yay!
# invert this NxN matrix.
if self.verbose:
print("invert matrix:")
print("time: {}".format(time.asctime(time.localtime(time.time()))))
if self.sparse:
inverted_piece = splin.inv(piece_to_invert)
else:
inverted_piece = np.linalg.inv(piece_to_invert)
if self.verbose:
print("done inverting via kernel trick at time: {}".format(time.asctime(time.localtime(time.time()))))
# dot with H^T.dot(y)
if self.verbose:
print("dot with H^T at time: {}".format(time.asctime(time.localtime(time.time()))))
self.W = self.X.T.dot(inverted_piece).dot(self.Y)
if self.verbose:
print("done dotting with H^T at time: {}".format(time.asctime(time.localtime(time.time()))))
assert self.W.shape == (self.d, self.C)
def predict(self):
if self.verbose:
print("prediction time.")
if self.W is None:
if self.kernelized:
self.kernelized_optimize()
else:
self.optimize()
Yhat = self.apply_weights()
assert type(Yhat) == np.ndarray
classes = np.argmax(Yhat, axis=1)
if self.sparse:
yhat = np.multiply(self.Y.toarray(), Yhat)
else:
yhat = np.multiply(self.Y, Yhat)
# collapse it into an Nx1 array:
self.yhat = np.amax(yhat, axis=1)
return classes
def run(self):
self.predict()
self.results = pd.DataFrame(self.results_row())
def loss_01(self):
return self.pred_to_01_loss(self.predict())
def results_row(self):
"""
Return a dictionary that can be put into a Pandas DataFrame.
"""
results_row = super(RidgeMulti, self).results_row()
# append on Ridge regression-specific results
more_details = {
"lambda":[self.lam],
"training SSE":[self.sse()],
"training RMSE":[self.rmse()],
"kernelized solvin":[self.kernelized]
}
results_row.update(more_details)
return results_row
def sse(self):
"""
Calculate the sum of squared errors.
In class on 10/26, Sham coached us to include errors for all
classifications in our RMSE (and thus SSE) calculations.
For y = [0, 1], Y=[[0, 1], [1, 0]], Yhat = [[0.01, 0.95], [0.99, 0.03]],
SSE = sum(0.01**2 + 0.05**2 + 0.01**2 + 0.03**2) = RSS
Note: this would not be equivalent to the binary classifier, which
would only sum (0.05**2 + 0.03**2)
My formula before only used the errors for the correct class:
error = self.apply_weights() - self.Y
error = np.multiply(error, self.Y)
error = np.amax(np.abs(error), axis=1)
return error.T.dot(error)
:return: sum of squared errors for all classes for each point (float)
"""
if self.sparse:
error = self.apply_weights() - self.Y.toarray()
assert type(error) == np.ndarray
else:
error = self.apply_weights() - self.Y
return np.multiply(error, error).sum()
def rmse(self):
"""
For the binary classifier, RMSE = (SSE/N)**0.5.
For the multiclass one, SSE is counting errors for all classifiers.
We could use (self.sse()/self.N/self.C)**0.5 to make the RMSE
calcs more similar between the binary and multi-class classifiers,
but they still are not the same, so I won't.
:return: RMSE (float)
"""
return(self.sse()/self.N)**0.5
class RidgeBinary(ClassificationBase):
"""
Train *one* ridge model.
"""
def __init__(self, X, y, lam, w=None, test_X=None, test_y = None):
"""
test_X, test_y are for compatibility only, because the questions for
other methods require knowing test data during fitting.
"""
self.X = X
self.N, self.d = X.shape
self.y = y
self.lam = lam
if w is None:
self.w = np.zeros(self.d)
assert self.w.shape == (self.d, )
self.threshold = None
def get_weights(self):
return self.w
def apply_weights(self):
return self.X.dot(self.w)
def run(self):
# find lambda*I_D + X^T*X
piece_to_invert = np.identity(self.d)*self.lam + self.X.T.dot(self.X)
inverted_piece = np.linalg.inv(piece_to_invert)
solution = inverted_piece.dot(self.X.T)
solution = solution.dot(self.y)
solution = np.squeeze(np.asarray(solution))
assert solution.shape == (self.d, )
self.w = solution
self.results = pd.DataFrame(self.results_row())
def predict(self, threshold):
if self.verbose:
print("dot X with W to make predictions. {}".format(time.asctime(time.localtime(time.time()))))
# TODO: having a default cutoff is a terrible idea!
Yhat = self.X.dot(self.w)
if self.verbose:
print("done dotting. {}".format(time.asctime(time.localtime(time.time()))))
classes = np.zeros(self.N)
classes[Yhat > threshold] = 1
return classes
def loss_01(self, threshold=None):
if threshold is None:
threshold=0.5
print("WARNING: 0/1 loss is calculated for threshold=0.5, which "
"is very likely to be a poor choice!!")
return self.pred_to_01_loss(self.predict(threshold))
def results_row(self):
"""
Return a dictionary that can be put into a Pandas DataFrame.
"""
results_row = super(RidgeBinary, self).results_row()
# append on logistic regression-specific results
more_details = {
"lambda":[self.lam],
"SSE":[self.sse()],
"RMSE":[self.rmse()],
}
results_row.update(more_details)
return results_row
def sse(self):
# sse = RSS
error = self.apply_weights() - self.y
return error.T.dot(error)
def rmse(self):
return(self.sse()/self.N)**0.5
class RidgeRegularizationPath:
""" DEPRECATED """
# TODO: refactor so it uses HyperparameterSweep class
def __init__(self, train_X, train_y, lam_max, frac_decrease, steps,
val_X, val_y):
self.train_X = train_X
self.train_y = train_y
self.train_N, self.train_d = train_X.shape
self.lam_max = lam_max
self.frac_decrease = frac_decrease
self.steps = steps
self.val_X = val_X
self.val_y = val_y
def train_with_lam(self, lam):
rr = RidgeBinary(self.train_X, self.train_y, lam=lam)
rr.solve()
sse_train = rr.sse()
# replace the y values with the validation y and get the val sss
rr.X = self.val_X
rr.y = self.val_y
sse_val = rr.sse()
assert rr.w.shape == (self.train_d, 1) # check before we slice out
return rr.w.toarray()[:,0], sse_train, sse_val
def walk_path(self):
# protect the first value of lambda.
lam = self.lam_max/self.frac_decrease
# initialize a dataframe to store results in
results = pd.DataFrame()
for c in range(0, self.steps):
lam = lam*self.frac_decrease
print("Loop {}: solving weights. Lambda = {}".format(c+1, lam))
w, sse_train, sse_val = self.train_with_lam(lam)
one_val = pd.DataFrame({"lam":[lam],
"weights":[w],
"SSE (training)": [sse_train],
"SSE (validaton)": [sse_val]})
results = pd.concat([results, one_val])
self.results_df = results
| mit |
robin-lai/scikit-learn | examples/ensemble/plot_forest_importances.py | 241 | 1761 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
| bsd-3-clause |
Nehoroshiy/urnn | examples/lasagne_rnn.py | 1 | 8511 | import theano
import lasagne
import numpy as np
import theano.tensor as T
from numpy import random as rnd, linalg as la
from layers import UnitaryLayer, UnitaryKronLayer, RecurrentUnitaryLayer, ComplexLayer, WTTLayer, ModRelu
from matplotlib import pyplot as plt
from utils.optimizations import nesterov_momentum, custom_sgd
from lasagne.nonlinearities import rectify
np.set_printoptions(linewidth=200, suppress=True)
#theano.config.exception_verbosity='high'
#theano.config.mode='DebugMode'
#theano.config.optimizer='None'
# Min/max sequence length
MIN_LENGTH = 50
MAX_LENGTH = 51
# Number of units in the hidden (recurrent) layer
N_HIDDEN = 81
# Number of training sequences in each batch
N_BATCH = 100
# Optimization learning rate
LEARNING_RATE = 3 * 1e-4
# All gradients above this will be clipped
GRAD_CLIP = 100
# How often should we check the output?
EPOCH_SIZE = 100
# Number of epochs to train the net
NUM_EPOCHS = 600
# Exact sequence length
TIME_SEQUENCES=100
def gen_data(min_length=MIN_LENGTH, max_length=MAX_LENGTH, n_batch=N_BATCH):
'''
Generate a batch of sequences for the "add" task, e.g. the target for the
following
``| 0.5 | 0.7 | 0.3 | 0.1 | 0.2 | ... | 0.5 | 0.9 | ... | 0.8 | 0.2 |
| 0 | 0 | 1 | 0 | 0 | | 0 | 1 | | 0 | 0 |``
would be 0.3 + .9 = 1.2. This task was proposed in [1]_ and explored in
e.g. [2]_.
Parameters
----------
min_length : int
Minimum sequence length.
max_length : int
Maximum sequence length.
n_batch : int
Number of samples in the batch.
Returns
-------
X : np.ndarray
Input to the network, of shape (n_batch, max_length, 2), where the last
dimension corresponds to the two sequences shown above.
y : np.ndarray
Correct output for each sample, shape (n_batch,).
mask : np.ndarray
A binary matrix of shape (n_batch, max_length) where ``mask[i, j] = 1``
when ``j <= (length of sequence i)`` and ``mask[i, j] = 0`` when ``j >
(length of sequence i)``.
References
----------
.. [1] Hochreiter, Sepp, and Jürgen Schmidhuber. "Long short-term memory."
Neural computation 9.8 (1997): 1735-1780.
.. [2] Sutskever, Ilya, et al. "On the importance of initialization and
momentum in deep learning." Proceedings of the 30th international
conference on machine learning (ICML-13). 2013.
'''
# Generate X - we'll fill the last dimension later
X = np.concatenate([np.random.uniform(size=(n_batch, max_length, 1)),
np.zeros((n_batch, max_length, 1))],
axis=-1)
mask = np.zeros((n_batch, max_length), dtype='int32')
y = np.zeros((n_batch,))
# Compute masks and correct values
for n in range(n_batch):
# Randomly choose the sequence length
length = np.random.randint(min_length, max_length)
# Make the mask for this sample 1 within the range of length
mask[n, :length] = 1
# Zero out X after the end of the sequence
X[n, length:, 0] = 0
# Set the second dimension to 1 at the indices to add
X[n, np.random.randint(length/10), 1] = 1
X[n, np.random.randint(length/2, length), 1] = 1
# Multiply and sum the dimensions of X to get the target value
y[n] = np.sum(X[n, :, 0]*X[n, :, 1])
# Center the inputs and outputs
X -= X.reshape(-1, 2).mean(axis=0)
y -= y.mean()
return (X.astype(theano.config.floatX), y.astype(theano.config.floatX),
mask.astype('int32'))
def main(n_iter, n_batch, n_hidden, time_steps, learning_rate, savefile, model, input_type, out_every_t, loss_function):
n_input = 2
n_output = 1
n_train = 100000
n_test = 10000
num_batches = n_train // n_batch
# --- Create data --------------------
train_x, train_y, train_mask = gen_data(min_length=time_steps, max_length=time_steps + 1, n_batch=n_train)
test_x, test_y, val_mask = gen_data(min_length=time_steps, max_length=time_steps + 1, n_batch=n_test)
s_train_x = theano.shared(train_x)
s_train_y = theano.shared(train_y)
s_test_x = theano.shared(test_x)
s_test_y = theano.shared(test_y)
gradient_clipping = np.float32(1)
learning_rate = theano.shared(np.array(learning_rate, dtype=theano.config.floatX))
# building network
l_in = lasagne.layers.InputLayer(shape=(None, MAX_LENGTH, N_INPUT))
l_mask = lasagne.layers.InputLayer(shape=(None, MAX_LENGTH),input_var=T.imatrix("mask"))
l_in_hid = lasagne.layers.DenseLayer(lasagne.layers.InputLayer((None, N_INPUT)), N_HIDDEN * 2)
# building hidden-hidden recurrent layer
if model == "":
pass
if __name__ == "__main__":
print("Building network ...")
N_INPUT=2
learning_rate = theano.shared(np.array(LEARNING_RATE, dtype=theano.config.floatX))
# input layer of shape (n_batch, n_timestems, n_input)
l_in = lasagne.layers.InputLayer(shape=(None, MAX_LENGTH, N_INPUT))
# mask of shape (n_batch, n_timesteps)
l_mask = lasagne.layers.InputLayer(shape=(None, MAX_LENGTH),input_var=T.imatrix("mask"))
# define input-to-hidden and hidden-to-hidden linear transformations
l_in_hid = lasagne.layers.DenseLayer(lasagne.layers.InputLayer((None, N_INPUT)), N_HIDDEN * 2)
#l_hid_hid = ComplexLayer(lasagne.layers.InputLayer((None, N_HIDDEN * 2)))
l_hid_hid = UnitaryLayer(lasagne.layers.InputLayer((None, N_HIDDEN * 2)))
manifolds = {}
#l_hid_hid = WTTLayer(lasagne.layers.InputLayer((None, N_HIDDEN * 2)), [3]*4, [2]*3)
manifold = l_hid_hid.manifold
if not isinstance(manifold, list):
manifold = [manifold]
manifolds = {man.str_id: man for man in manifold}
#manifolds = {}
# recurrent layer using linearities defined above
l_rec = RecurrentUnitaryLayer(l_in, l_in_hid, l_hid_hid, nonlinearity=ModRelu(lasagne.layers.InputLayer((None, N_HIDDEN * 2))),
mask_input=l_mask, only_return_final=True)
print(lasagne.layers.get_output_shape(l_rec))
# nonlinearity for recurrent layer output
#l_nonlin = ModRelu(l_rec)
#print(lasagne.layers.get_output_shape(l_nonlin))
l_reshape = lasagne.layers.ReshapeLayer(l_rec, (-1, N_HIDDEN * 2))
print(lasagne.layers.get_output_shape(l_reshape))
# Our output layer is a simple dense connection, with 1 output unit
l_dense = lasagne.layers.DenseLayer(l_reshape, num_units=1, nonlinearity=None)
l_out = lasagne.layers.ReshapeLayer(l_dense, (N_BATCH, -1))
print(lasagne.layers.get_output_shape(l_out))
target_values = T.vector('target_output')
# lasagne.layers.get_output produces a variable for the output of the net
network_output = lasagne.layers.get_output(l_out)
predicted_values = network_output.flatten()
# Our cost will be mean-squared error
cost = T.mean((predicted_values - target_values)**2)
# Retrieve all parameters from the network
all_params = lasagne.layers.get_all_params(l_out,trainable=True)
print(all_params)
print(lasagne.layers.get_all_params(l_rec))
# Compute SGD updates for training
print("Computing updates ...")
updates = custom_sgd(cost, all_params, LEARNING_RATE, manifolds)
# Theano functions for training and computing cost
print("Compiling functions ...")
train = theano.function([l_in.input_var, target_values, l_mask.input_var],
cost, updates=updates, on_unused_input='warn')
compute_cost = theano.function(
[l_in.input_var, target_values, l_mask.input_var], cost, on_unused_input='warn')
# We'll use this "validation set" to periodically check progress
X_val, y_val, mask_val = gen_data(n_batch=100)
#TEST
#ll = lasagne.layers.InputLayer((None, N_HIDDEN * 2))
#v = ModRelu(ll)
#v_out =lasagne.layers.get_output(v)
#print(T.grad(v_out.mean(),ll.input_var).eval({ll.input_var: np.zeros([5,N_HIDDEN*2])})) #with ones its okay
#TEST
try:
for epoch in range(NUM_EPOCHS):
if (epoch + 1) % 100 == 0:
learning_rate.set_value(learning_rate.get_value() * 0.9)
cost_val = compute_cost(X_val, y_val, mask_val)
for _ in range(EPOCH_SIZE):
X, y, m = gen_data()
train(X, y, m.astype('int32'))
print("Epoch {} validation cost = {}".format(epoch, cost_val))
except KeyboardInterrupt:
pass
| mit |
NeoBoy/STSP_IIUI-Spring2016 | Task2/nnet.py | 1 | 11003 | # -*- coding: utf-8 -*-
"""
The goal of this file is to design a class for Neural Networks
@author: Sharjeel Abid Butt
@References
1. http://ufldl.stanford.edu/wiki/index.php/Backpropagation_Algorithm
2. https://grantbeyleveld.wordpress.com/2015/10/09/implementing-a-artificial-neural-network-in-python/
3. http://www.wildml.com/2015/09/implementing-a-neural-network-from-scratch
4. http://www.bogotobogo.com/python/python_Neural_Networks_Backpropagation_for_XOR_using_one_hidden_layer.php
5. https://medium.com/learning-new-stuff/how-to-learn-neural-networks-758b78f2736e#.y5mgkr1zw
6. http://jeremykun.com/2012/12/09/neural-networks-and-backpropagation/
7. https://metacademy.org/graphs/concepts/backpropagation
"""
import mnist_load as mload
import copy
import numpy as np
#import scipy as sp
#import scipy.stats as stats
#import pandas as pd
#import matplotlib as mpl
import matplotlib.pyplot as plt
def tic():
#Homemade version of matlab tic and toc functions
import time
global startTime_for_tictoc
startTime_for_tictoc = time.time()
def toc():
import time
if 'startTime_for_tictoc' in globals():
print "Elapsed time is " + str(time.time() - startTime_for_tictoc) + " seconds."
else:
print "Toc: start time not set"
def dataExtraction(data = 'train', class1 = 1, class0 = 0):
[data, labels] = mload.load(data)
y1 = np.extract(labels == class1, labels)
X1 = data[labels == class1, :, :]
y0 = np.extract(labels == class0, labels)
X0 = data[labels == class0, :, :]
y = np.concatenate((y1, y0), axis = 0)
X = np.concatenate((X1, X0), axis = 0)
X = np.reshape(X, (np.shape(X)[0], np.shape(X)[1] * np.shape(X)[2]))
X = (X - np.mean(X, axis = 0)) / (1 + np.std(X, axis = 0)) # Data Normalization
y[y == class1] = 1
y[y == class0] = 0
y = np.reshape(y, (np.shape(X)[0], 1))
return y, X
class nnet(object):
"""
A class that implements Basic Neural Networks Architecture
"""
def __init__(self, noOfInputs = 2, noOfLayers = 2, nodesInEachLayer = [2, 2],
noOfOutputs = 2, activationFunction = 'sigmoid',
parametersRange = [-1, 1]):
"""
Creates a Neural Network
"""
if (len(nodesInEachLayer) != noOfLayers):
raise ValueError('Incorrect Parameters provided!')
self.n_I = noOfInputs
self.n_L = noOfLayers
self.n_H = nodesInEachLayer
self.n_O = noOfOutputs
self.a_Func = activationFunction
self.pR = parametersRange
#self.Nstruct = [noOfInputs, nodesInEachLayer, noOfOutputs]
self.Theta = []
self.nodes = []
# self.nodes.append(np.zeros((noOfInputs, 1)))
lmin, lmax = parametersRange
for l in range(noOfLayers + 1):
if l == 0:
tempTheta = self.randTheta(lmin, lmax, noOfInputs, self.n_H[l])
elif l == noOfLayers:
tempTheta = self.randTheta(lmin, lmax, self.n_H[-1], noOfOutputs)
else:
tempTheta = self.randTheta(lmin, lmax, self.n_H[l - 1], self.n_H[l])
tempNode = np.shape(tempTheta)[1]
self.Theta.append(tempTheta)
self.nodes.append(tempNode)
def __str__(self):
return "This neural network has a " + str(self.n_I) + ' X ' + \
str(self.n_H) + ' X ' + str(self.n_O) + " structure."
def randTheta(self, l_min, l_max, i_nodes, o_nodes):
theta = l_min + np.random.rand(i_nodes + 1, o_nodes) * (l_max - l_min)
return theta
def sigmoid(self, z, derivative = False):
if derivative:
return z * (1 - z)
S = 1.0 / (1.0 + np.exp(-z))
return S
def setTheta(self, thetaLayer = 1, thetaIndex = [1, 0], allSet = False):
"""
Updates the Theta values of the Neural Network
"""
if allSet:
for l in range(self.n_L + 1):
print '\n\nEnter Theta values for Layer ' + str(l + 1) + ':\n'
in_nodes, out_nodes = np.shape(self.Theta[l])
for inIndex in range(in_nodes):
for outIndex in range(out_nodes):
self.Theta[l][inIndex][outIndex] = float (raw_input( \
'Enter Theta[' + str(outIndex + 1) + '][' + str(inIndex) +']:'))
else:
outIndex, inIndex = thetaIndex
self.Theta[thetaLayer - 1][inIndex][outIndex - 1] = float (raw_input( \
'Enter Theta[' + str(outIndex) + '][' + str(inIndex) +']:'))
print '\n\n\nTheta Update Complete.\n\n\n'
def getTheta(self):
return copy.deepcopy(self.Theta)
def forward_pass(self, nodes, X, y):
"""
Does the forward pass stage of Backpropagation
"""
# raise NotImplementedError
m = np.shape(y)[0]
for l in range(self.n_L + 1):
if l == 0:
node_in = np.concatenate((np.ones((m, 1)), X), axis = 1)
else:
node_in = np.concatenate((np.ones((m, 1)), nodes[l - 1]), axis = 1)
node_out = np.dot(node_in, self.Theta[l])
if self.a_Func == 'sigmoid':
nodes[l] = self.sigmoid(node_out)
return nodes
def backward_pass(self, delta, nodes, X, y, grad, Lambda, quadLoss):
"""
Does the Backpass stage of Backpropagation
"""
# raise NotImplementedError
m = np.shape(y)[0]
if self.a_Func == 'sigmoid':
delta[-1] = (nodes[-1] - y)
if quadLoss:
delta[-1] *= self.sigmoid(nodes[-1], True)
for l in range(self.n_L - 1, -1, -1):
delta[l] = np.dot(delta[l + 1], self.Theta[l + 1][1:].T) \
* self.sigmoid(nodes[l], True)
for l in range(self.n_L + 1):
if l == 0:
Xconcate = np.concatenate((np.ones((m, 1)), X), axis = 1)
grad[l] = np.dot(Xconcate.T, delta[l])
else:
nodeConcated = np.concatenate((np.ones((m, 1)), nodes[l - 1]), axis = 1)
grad[l] = np.dot(nodeConcated.T, delta[l])
if Lambda != 0:
grad[l][1:] += Lambda * self.Theta[l][1:]
return grad, delta
def trainNNET(self, data, labels, stoppingCriteria = 1e-3, LearningRate = 1e-1,
Lambda = 0, noOfIterations = 1000, quadLoss = False, moreDetail = False):
"""
Does the training of the Neural Network
"""
# raise NotImplementedError
if (np.shape(data)[0] != np.shape(labels)[0] or \
np.shape(data)[1] != self.n_I or \
np.shape(labels)[1] != self.n_O):
raise ValueError('Data is not suitable for this neural network')
m = np.shape(labels)[0]
nodes = []
delta = []
grad = []
eV = []
print 'Training Started:'
for l in range(self.n_L + 1):
nodes.append(np.zeros((m, self.nodes[l])))
delta.append(np.zeros((m, self.nodes[l])))
grad.append(np.shape(self.Theta[l]))
print "Epoch \t Error"
for epoch in range(noOfIterations):
nodes = self.forward_pass(nodes, data, labels)
labels_hat = nodes[-1]
if quadLoss:
error = np.sum((labels_hat - labels) ** 2) / (2.0 * m)
else:
error = - np.sum(labels * np.log(labels_hat) + \
(1.0 - labels) * np.log(1.0 - labels_hat)) * 1.0 / m
if Lambda != 0:
for l in range(self.n_L + 1):
error += Lambda / 2 * np.sum(self.Theta[l][1:] ** 2) / m
print str(epoch) + " \t " + str(np.nan_to_num(error))
eV.append(error)
if error <= stoppingCriteria:
break
else:
grad, delta = self.backward_pass(delta, nodes, data, \
labels, grad, Lambda, quadLoss)
for l in range(self.n_L + 1):
self.Theta[l] -= LearningRate / m * grad[l]
if moreDetail:
return eV, nodes, grad, delta
return eV
def predictNNET(self, data, labels):
nodes = []
m = np.shape(labels)[0]
for l in range(self.n_L + 1):
nodes.append(np.zeros((m, self.nodes[l])))
nodes = self.forward_pass(nodes, data, labels)
labels_hat = nodes[-1] > 0.5
return labels_hat
# Main Code starts here
inData = np.array([[0.1, 0.5]])
outData = np.array([[0.5, 0.1]])
Q2 = nnet(noOfInputs = 2, noOfLayers = 1, nodesInEachLayer = [2], noOfOutputs = 2)
Q2.setTheta(allSet = True)
original_theta = Q2.getTheta()
loss, nodes, grad, delta = Q2.trainNNET(inData, outData, LearningRate = 0.1, \
noOfIterations = 1, moreDetail = True)
updated_Theta = Q2.getTheta()
#class1 = 1
#class0 = 0
#
#labelsTrain, dataTrain = dataExtraction('train', class1, class0)
#
#noOfIter = 5000
#learningRate = 1e-1
#stopVal = 1e-3
#Lambda = 1e-1
#pR = [-1, 1]
#
#mnistClassifier = nnet(noOfInputs = 784, noOfLayers = 2, nodesInEachLayer = [50, 50], \
# noOfOutputs = 1, parametersRange = pR)
#
#tic()
#loss = mnistClassifier.trainNNET(dataTrain, labelsTrain, noOfIterations = 100, \
# LearningRate = learningRate, Lambda = Lambda, \
# quadLoss = False)
#
#toc()
#
#plt.figure()
#plt.plot(loss)
#plt.show()
#
#print "\n\n\n"
#
#labels_hatTrain = mnistClassifier.predictNNET(dataTrain, labelsTrain)
#Train_Accuracy = np.sum(labels_hatTrain == labelsTrain) * 100.0 / np.shape(labelsTrain)[0]
#print "Training Accuracy = " + str(Train_Accuracy) + "%"
#
#labelsTest, dataTest = dataExtraction('test', class1, class0)
#
#labels_hatTest = mnistClassifier.predictNNET(dataTest, labelsTest)
#Test_Accuracy = np.sum(labels_hatTest == labelsTest) * 100.0 / np.shape(labelsTest)[0]
#print "Test Accuracy = " + str(Test_Accuracy) + "%"
| bsd-2-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/matplotlib/ticker.py | 4 | 63240 | """
Tick locating and formatting
============================
This module contains classes to support completely configurable tick locating
and formatting. Although the locators know nothing about major or minor
ticks, they are used by the Axis class to support major and minor tick
locating and formatting. Generic tick locators and formatters are provided,
as well as domain specific custom ones..
Default Formatter
-----------------
The default formatter identifies when the x-data being
plotted is a small range on top of a large off set. To
reduce the chances that the ticklabels overlap the ticks
are labeled as deltas from a fixed offset. For example::
ax.plot(np.arange(2000, 2010), range(10))
will have tick of 0-9 with an offset of +2e3. If this
is not desired turn off the use of the offset on the default
formatter::
ax.get_xaxis().get_major_formatter().set_useOffset(False)
set the rcParam ``axes.formatter.useoffset=False`` to turn it off
globally, or set a different formatter.
Tick locating
-------------
The Locator class is the base class for all tick locators. The locators
handle autoscaling of the view limits based on the data limits, and the
choosing of tick locations. A useful semi-automatic tick locator is
MultipleLocator. You initialize this with a base, e.g., 10, and it picks axis
limits and ticks that are multiples of your base.
The Locator subclasses defined here are
:class:`NullLocator`
No ticks
:class:`FixedLocator`
Tick locations are fixed
:class:`IndexLocator`
locator for index plots (e.g., where x = range(len(y)))
:class:`LinearLocator`
evenly spaced ticks from min to max
:class:`LogLocator`
logarithmically ticks from min to max
:class:`SymmetricalLogLocator`
locator for use with with the symlog norm, works like the `LogLocator` for
the part outside of the threshold and add 0 if inside the limits
:class:`MultipleLocator`
ticks and range are a multiple of base;
either integer or float
:class:`OldAutoLocator`
choose a MultipleLocator and dyamically reassign it for
intelligent ticking during navigation
:class:`MaxNLocator`
finds up to a max number of ticks at nice locations
:class:`AutoLocator`
:class:`MaxNLocator` with simple defaults. This is the default
tick locator for most plotting.
:class:`AutoMinorLocator`
locator for minor ticks when the axis is linear and the
major ticks are uniformly spaced. It subdivides the major
tick interval into a specified number of minor intervals,
defaulting to 4 or 5 depending on the major interval.
There are a number of locators specialized for date locations - see
the dates module
You can define your own locator by deriving from Locator. You must
override the __call__ method, which returns a sequence of locations,
and you will probably want to override the autoscale method to set the
view limits from the data limits.
If you want to override the default locator, use one of the above or a
custom locator and pass it to the x or y axis instance. The relevant
methods are::
ax.xaxis.set_major_locator( xmajorLocator )
ax.xaxis.set_minor_locator( xminorLocator )
ax.yaxis.set_major_locator( ymajorLocator )
ax.yaxis.set_minor_locator( yminorLocator )
The default minor locator is the NullLocator, e.g., no minor ticks on by
default.
Tick formatting
---------------
Tick formatting is controlled by classes derived from Formatter. The
formatter operates on a single tick value and returns a string to the
axis.
:class:`NullFormatter`
no labels on the ticks
:class:`IndexFormatter`
set the strings from a list of labels
:class:`FixedFormatter`
set the strings manually for the labels
:class:`FuncFormatter`
user defined function sets the labels
:class:`StrMethodFormatter`
Use string `format` method
:class:`FormatStrFormatter`
use a sprintf format string
:class:`ScalarFormatter`
default formatter for scalars; autopick the fmt string
:class:`LogFormatter`
formatter for log axes
You can derive your own formatter from the Formatter base class by
simply overriding the ``__call__`` method. The formatter class has access
to the axis view and data limits.
To control the major and minor tick label formats, use one of the
following methods::
ax.xaxis.set_major_formatter( xmajorFormatter )
ax.xaxis.set_minor_formatter( xminorFormatter )
ax.yaxis.set_major_formatter( ymajorFormatter )
ax.yaxis.set_minor_formatter( yminorFormatter )
See :ref:`pylab_examples-major_minor_demo1` for an example of setting
major and minor ticks. See the :mod:`matplotlib.dates` module for
more information and examples of using date locators and formatters.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import decimal
import locale
import math
import numpy as np
from matplotlib import rcParams
from matplotlib import cbook
from matplotlib import transforms as mtransforms
import warnings
if six.PY3:
long = int
class _DummyAxis(object):
def __init__(self, minpos=0):
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self._minpos = minpos
def get_view_interval(self):
return self.viewLim.intervalx
def set_view_interval(self, vmin, vmax):
self.viewLim.intervalx = vmin, vmax
def get_minpos(self):
return self._minpos
def get_data_interval(self):
return self.dataLim.intervalx
def set_data_interval(self, vmin, vmax):
self.dataLim.intervalx = vmin, vmax
class TickHelper(object):
axis = None
def set_axis(self, axis):
self.axis = axis
def create_dummy_axis(self, **kwargs):
if self.axis is None:
self.axis = _DummyAxis(**kwargs)
def set_view_interval(self, vmin, vmax):
self.axis.set_view_interval(vmin, vmax)
def set_data_interval(self, vmin, vmax):
self.axis.set_data_interval(vmin, vmax)
def set_bounds(self, vmin, vmax):
self.set_view_interval(vmin, vmax)
self.set_data_interval(vmin, vmax)
class Formatter(TickHelper):
"""
Convert the tick location to a string
"""
# some classes want to see all the locs to help format
# individual ones
locs = []
def __call__(self, x, pos=None):
"""Return the format for tick val x at position pos; pos=None
indicated unspecified"""
raise NotImplementedError('Derived must override')
def format_data(self, value):
return self.__call__(value)
def format_data_short(self, value):
"""return a short string version"""
return self.format_data(value)
def get_offset(self):
return ''
def set_locs(self, locs):
self.locs = locs
def fix_minus(self, s):
"""
Some classes may want to replace a hyphen for minus with the
proper unicode symbol (U+2212) for typographical correctness.
The default is to not replace it.
Note, if you use this method, e.g., in :meth:`format_data` or
call, you probably don't want to use it for
:meth:`format_data_short` since the toolbar uses this for
interactive coord reporting and I doubt we can expect GUIs
across platforms will handle the unicode correctly. So for
now the classes that override :meth:`fix_minus` should have an
explicit :meth:`format_data_short` method
"""
return s
class IndexFormatter(Formatter):
"""
format the position x to the nearest i-th label where i=int(x+0.5)
"""
def __init__(self, labels):
self.labels = labels
self.n = len(labels)
def __call__(self, x, pos=None):
"""Return the format for tick val x at position pos; pos=None
indicated unspecified"""
i = int(x + 0.5)
if i < 0:
return ''
elif i >= self.n:
return ''
else:
return self.labels[i]
class NullFormatter(Formatter):
'Always return the empty string'
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return ''
class FixedFormatter(Formatter):
'Return fixed strings for tick labels'
def __init__(self, seq):
"""
*seq* is a sequence of strings. For positions ``i < len(seq)`` return
*seq[i]* regardless of *x*. Otherwise return ''
"""
self.seq = seq
self.offset_string = ''
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
if pos is None or pos >= len(self.seq):
return ''
else:
return self.seq[pos]
def get_offset(self):
return self.offset_string
def set_offset_string(self, ofs):
self.offset_string = ofs
class FuncFormatter(Formatter):
"""
User defined function for formatting
The function should take in two inputs (tick value *x* and position *pos*)
and return a string
"""
def __init__(self, func):
self.func = func
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.func(x, pos)
class FormatStrFormatter(Formatter):
"""
Use an old-style ('%' operator) format string to format the tick
"""
def __init__(self, fmt):
self.fmt = fmt
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.fmt % x
class StrMethodFormatter(Formatter):
"""
Use a new-style format string (as used by `str.format()`)
to format the tick. The field formatting must be labeled `x`.
"""
def __init__(self, fmt):
self.fmt = fmt
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.fmt.format(x=x)
class OldScalarFormatter(Formatter):
"""
Tick location is a plain old number.
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
xmin, xmax = self.axis.get_view_interval()
d = abs(xmax - xmin)
return self.pprint_val(x, d)
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x) < 1e4 and x == int(x):
return '%d' % x
if d < 1e-2:
fmt = '%1.3e'
elif d < 1e-1:
fmt = '%1.3f'
elif d > 1e5:
fmt = '%1.1e'
elif d > 10:
fmt = '%1.1f'
elif d > 1:
fmt = '%1.2f'
else:
fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup) == 2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' % (mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class ScalarFormatter(Formatter):
"""
Tick location is a plain old number. If useOffset==True and the data range
is much smaller than the data average, then an offset will be determined
such that the tick labels are meaningful. Scientific notation is used for
data < 10^-n or data >= 10^m, where n and m are the power limits set using
set_powerlimits((n,m)). The defaults for these are controlled by the
axes.formatter.limits rc parameter.
"""
def __init__(self, useOffset=None, useMathText=None, useLocale=None):
# useOffset allows plotting small data ranges with large offsets: for
# example: [1+1e-9,1+2e-9,1+3e-9] useMathText will render the offset
# and scientific notation in mathtext
if useOffset is None:
useOffset = rcParams['axes.formatter.useoffset']
self.set_useOffset(useOffset)
self._usetex = rcParams['text.usetex']
if useMathText is None:
useMathText = rcParams['axes.formatter.use_mathtext']
self._useMathText = useMathText
self.orderOfMagnitude = 0
self.format = ''
self._scientific = True
self._powerlimits = rcParams['axes.formatter.limits']
if useLocale is None:
useLocale = rcParams['axes.formatter.use_locale']
self._useLocale = useLocale
def get_useOffset(self):
return self._useOffset
def set_useOffset(self, val):
if val in [True, False]:
self.offset = 0
self._useOffset = val
else:
self._useOffset = False
self.offset = val
useOffset = property(fget=get_useOffset, fset=set_useOffset)
def get_useLocale(self):
return self._useLocale
def set_useLocale(self, val):
if val is None:
self._useLocale = rcParams['axes.formatter.use_locale']
else:
self._useLocale = val
useLocale = property(fget=get_useLocale, fset=set_useLocale)
def fix_minus(self, s):
"""use a unicode minus rather than hyphen"""
if rcParams['text.usetex'] or not rcParams['axes.unicode_minus']:
return s
else:
return s.replace('-', '\u2212')
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
if len(self.locs) == 0:
return ''
else:
s = self.pprint_val(x)
return self.fix_minus(s)
def set_scientific(self, b):
'''True or False to turn scientific notation on or off
see also :meth:`set_powerlimits`
'''
self._scientific = bool(b)
def set_powerlimits(self, lims):
'''
Sets size thresholds for scientific notation.
e.g., ``formatter.set_powerlimits((-3, 4))`` sets the pre-2007 default
in which scientific notation is used for numbers less than 1e-3 or
greater than 1e4.
See also :meth:`set_scientific`.
'''
if len(lims) != 2:
raise ValueError("'lims' must be a sequence of length 2")
self._powerlimits = lims
def format_data_short(self, value):
"""return a short formatted string representation of a number"""
if self._useLocale:
return locale.format_string('%-12g', (value,))
else:
return '%-12g' % value
def format_data(self, value):
'return a formatted string representation of a number'
if self._useLocale:
s = locale.format_string('%1.10e', (value,))
else:
s = '%1.10e' % value
s = self._formatSciNotation(s)
return self.fix_minus(s)
def get_offset(self):
"""Return scientific notation, plus offset"""
if len(self.locs) == 0:
return ''
s = ''
if self.orderOfMagnitude or self.offset:
offsetStr = ''
sciNotStr = ''
if self.offset:
offsetStr = self.format_data(self.offset)
if self.offset > 0:
offsetStr = '+' + offsetStr
if self.orderOfMagnitude:
if self._usetex or self._useMathText:
sciNotStr = self.format_data(10 ** self.orderOfMagnitude)
else:
sciNotStr = '1e%d' % self.orderOfMagnitude
if self._useMathText:
if sciNotStr != '':
sciNotStr = r'\times\mathdefault{%s}' % sciNotStr
s = ''.join(('$', sciNotStr,
r'\mathdefault{', offsetStr, '}$'))
elif self._usetex:
if sciNotStr != '':
sciNotStr = r'\times%s' % sciNotStr
s = ''.join(('$', sciNotStr, offsetStr, '$'))
else:
s = ''.join((sciNotStr, offsetStr))
return self.fix_minus(s)
def set_locs(self, locs):
'set the locations of the ticks'
self.locs = locs
if len(self.locs) > 0:
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax - vmin)
if self._useOffset:
self._set_offset(d)
self._set_orderOfMagnitude(d)
self._set_format(vmin, vmax)
def _set_offset(self, range):
# offset of 20,001 is 20,000, for example
locs = self.locs
if locs is None or not len(locs) or range == 0:
self.offset = 0
return
ave_loc = np.mean(locs)
if ave_loc: # dont want to take log10(0)
ave_oom = math.floor(math.log10(np.mean(np.absolute(locs))))
range_oom = math.floor(math.log10(range))
if np.absolute(ave_oom - range_oom) >= 3: # four sig-figs
p10 = 10 ** range_oom
if ave_loc < 0:
self.offset = (np.ceil(np.max(locs) / p10) * p10)
else:
self.offset = (np.floor(np.min(locs) / p10) * p10)
else:
self.offset = 0
def _set_orderOfMagnitude(self, range):
# if scientific notation is to be used, find the appropriate exponent
# if using an numerical offset, find the exponent after applying the
# offset
if not self._scientific:
self.orderOfMagnitude = 0
return
locs = np.absolute(self.locs)
if self.offset:
oom = math.floor(math.log10(range))
else:
if locs[0] > locs[-1]:
val = locs[0]
else:
val = locs[-1]
if val == 0:
oom = 0
else:
oom = math.floor(math.log10(val))
if oom <= self._powerlimits[0]:
self.orderOfMagnitude = oom
elif oom >= self._powerlimits[1]:
self.orderOfMagnitude = oom
else:
self.orderOfMagnitude = 0
def _set_format(self, vmin, vmax):
# set the format string to format all the ticklabels
if len(self.locs) < 2:
# Temporarily augment the locations with the axis end points.
_locs = list(self.locs) + [vmin, vmax]
else:
_locs = self.locs
locs = (np.asarray(_locs) - self.offset) / 10. ** self.orderOfMagnitude
loc_range = np.ptp(locs)
# Curvilinear coordinates can yield two identical points.
if loc_range == 0:
loc_range = np.max(np.abs(locs))
# Both points might be zero.
if loc_range == 0:
loc_range = 1
if len(self.locs) < 2:
# We needed the end points only for the loc_range calculation.
locs = locs[:-2]
loc_range_oom = int(math.floor(math.log10(loc_range)))
# first estimate:
sigfigs = max(0, 3 - loc_range_oom)
# refined estimate:
thresh = 1e-3 * 10 ** loc_range_oom
while sigfigs >= 0:
if np.abs(locs - np.round(locs, decimals=sigfigs)).max() < thresh:
sigfigs -= 1
else:
break
sigfigs += 1
self.format = '%1.' + str(sigfigs) + 'f'
if self._usetex:
self.format = '$%s$' % self.format
elif self._useMathText:
self.format = '$\mathdefault{%s}$' % self.format
def pprint_val(self, x):
xp = (x - self.offset) / (10. ** self.orderOfMagnitude)
if np.absolute(xp) < 1e-8:
xp = 0
if self._useLocale:
return locale.format_string(self.format, (xp,))
else:
return self.format % xp
def _formatSciNotation(self, s):
# transform 1e+004 into 1e4, for example
if self._useLocale:
decimal_point = locale.localeconv()['decimal_point']
positive_sign = locale.localeconv()['positive_sign']
else:
decimal_point = '.'
positive_sign = '+'
tup = s.split('e')
try:
significand = tup[0].rstrip('0').rstrip(decimal_point)
sign = tup[1][0].replace(positive_sign, '')
exponent = tup[1][1:].lstrip('0')
if self._useMathText or self._usetex:
if significand == '1' and exponent != '':
# reformat 1x10^y as 10^y
significand = ''
if exponent:
exponent = '10^{%s%s}' % (sign, exponent)
if significand and exponent:
return r'%s{\times}%s' % (significand, exponent)
else:
return r'%s%s' % (significand, exponent)
else:
s = ('%se%s%s' % (significand, sign, exponent)).rstrip('e')
return s
except IndexError:
return s
class LogFormatter(Formatter):
"""
Format values for log axis;
"""
def __init__(self, base=10.0, labelOnlyBase=True):
"""
*base* is used to locate the decade tick,
which will be the only one to be labeled if *labelOnlyBase*
is ``False``
"""
self._base = base + 0.0
self.labelOnlyBase = labelOnlyBase
def base(self, base):
"""change the *base* for labeling - warning: should always match the
base used for :class:`LogLocator`"""
self._base = base
def label_minor(self, labelOnlyBase):
'switch on/off minor ticks labeling'
self.labelOnlyBase = labelOnlyBase
def __call__(self, x, pos=None):
"""Return the format for tick val *x* at position *pos*"""
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax - vmin)
b = self._base
if x == 0.0:
return '0'
sign = np.sign(x)
# only label the decades
fx = math.log(abs(x)) / math.log(b)
isDecade = is_close_to_int(fx)
if not isDecade and self.labelOnlyBase:
s = ''
elif x > 10000:
s = '%1.0e' % x
elif x < 1:
s = '%1.0e' % x
else:
s = self.pprint_val(x, d)
if sign == -1:
s = '-%s' % s
return self.fix_minus(s)
def format_data(self, value):
b = self.labelOnlyBase
self.labelOnlyBase = False
value = cbook.strip_math(self.__call__(value))
self.labelOnlyBase = b
return value
def format_data_short(self, value):
'return a short formatted string representation of a number'
return '%-12g' % value
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x) < 1e4 and x == int(x):
return '%d' % x
if d < 1e-2:
fmt = '%1.3e'
elif d < 1e-1:
fmt = '%1.3f'
elif d > 1e5:
fmt = '%1.1e'
elif d > 10:
fmt = '%1.1f'
elif d > 1:
fmt = '%1.2f'
else:
fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup) == 2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' % (mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class LogFormatterExponent(LogFormatter):
"""
Format values for log axis; using ``exponent = log_base(value)``
"""
def __call__(self, x, pos=None):
"""Return the format for tick val *x* at position *pos*"""
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
d = abs(vmax - vmin)
b = self._base
if x == 0:
return '0'
sign = np.sign(x)
# only label the decades
fx = math.log(abs(x)) / math.log(b)
isDecade = is_close_to_int(fx)
if not isDecade and self.labelOnlyBase:
s = ''
elif abs(fx) > 10000:
s = '%1.0g' % fx
elif abs(fx) < 1:
s = '%1.0g' % fx
else:
s = self.pprint_val(fx, d)
if sign == -1:
s = '-%s' % s
return self.fix_minus(s)
class LogFormatterMathtext(LogFormatter):
"""
Format values for log axis; using ``exponent = log_base(value)``
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
b = self._base
usetex = rcParams['text.usetex']
# only label the decades
if x == 0:
if usetex:
return '$0$'
else:
return '$\mathdefault{0}$'
fx = math.log(abs(x)) / math.log(b)
is_decade = is_close_to_int(fx)
sign_string = '-' if x < 0 else ''
# use string formatting of the base if it is not an integer
if b % 1 == 0.0:
base = '%d' % b
else:
base = '%s' % b
if not is_decade and self.labelOnlyBase:
return ''
elif not is_decade:
if usetex:
return (r'$%s%s^{%.2f}$') % \
(sign_string, base, fx)
else:
return ('$\mathdefault{%s%s^{%.2f}}$') % \
(sign_string, base, fx)
else:
if usetex:
return (r'$%s%s^{%d}$') % (sign_string,
base,
nearest_long(fx))
else:
return (r'$\mathdefault{%s%s^{%d}}$') % (sign_string,
base,
nearest_long(fx))
class LogitFormatter(Formatter):
'''Probability formatter (using Math text)'''
def __call__(self, x, pos=None):
s = ''
if 0.01 <= x <= 0.99:
s = '{:.2f}'.format(x)
elif x < 0.01:
if is_decade(x):
s = '$10^{{{:.0f}}}$'.format(np.log10(x))
else:
s = '${:.5f}$'.format(x)
else: # x > 0.99
if is_decade(1-x):
s = '$1-10^{{{:.0f}}}$'.format(np.log10(1-x))
else:
s = '$1-{:.5f}$'.format(1-x)
return s
def format_data_short(self, value):
'return a short formatted string representation of a number'
return '%-12g' % value
class EngFormatter(Formatter):
"""
Formats axis values using engineering prefixes to represent powers of 1000,
plus a specified unit, e.g., 10 MHz instead of 1e7.
"""
# the unicode for -6 is the greek letter mu
# commeted here due to bug in pep8
# (https://github.com/jcrocholl/pep8/issues/271)
# The SI engineering prefixes
ENG_PREFIXES = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "\u03bc",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y"
}
def __init__(self, unit="", places=None):
self.unit = unit
self.places = places
def __call__(self, x, pos=None):
s = "%s%s" % (self.format_eng(x), self.unit)
return self.fix_minus(s)
def format_eng(self, num):
""" Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng(0) # for self.places = 0
'0'
>>> format_eng(1000000) # for self.places = 1
'1.0 M'
>>> format_eng("-1e-6") # for self.places = 2
u'-1.00 \u03bc'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
dnum = decimal.Decimal(str(num))
sign = 1
if dnum < 0:
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
prefix = self.ENG_PREFIXES[int(pow10)]
mant = sign * dnum / (10 ** pow10)
if self.places is None:
format_str = "%g %s"
elif self.places == 0:
format_str = "%i %s"
elif self.places > 0:
format_str = ("%%.%if %%s" % self.places)
formatted = format_str % (mant, prefix)
return formatted.strip()
class Locator(TickHelper):
"""
Determine the tick locations;
Note, you should not use the same locator between different
:class:`~matplotlib.axis.Axis` because the locator stores references to
the Axis data and view limits
"""
# Some automatic tick locators can generate so many ticks they
# kill the machine when you try and render them.
# This parameter is set to cause locators to raise an error if too
# many ticks are generated.
MAXTICKS = 1000
def tick_values(self, vmin, vmax):
"""
Return the values of the located ticks given **vmin** and **vmax**.
.. note::
To get tick locations with the vmin and vmax values defined
automatically for the associated :attr:`axis` simply call
the Locator instance::
>>> print((type(loc)))
<type 'Locator'>
>>> print((loc()))
[1, 2, 3, 4]
"""
raise NotImplementedError('Derived must override')
def set_params(self, **kwargs):
"""
Do nothing, and rase a warning. Any locator class not supporting the
set_params() function will call this.
"""
warnings.warn("'set_params()' not defined for locator of type " +
str(type(self)))
def __call__(self):
"""Return the locations of the ticks"""
# note: some locators return data limits, other return view limits,
# hence there is no *one* interface to call self.tick_values.
raise NotImplementedError('Derived must override')
def raise_if_exceeds(self, locs):
"""raise a RuntimeError if Locator attempts to create more than
MAXTICKS locs"""
if len(locs) >= self.MAXTICKS:
msg = ('Locator attempting to generate %d ticks from %s to %s: ' +
'exceeds Locator.MAXTICKS') % (len(locs), locs[0], locs[-1])
raise RuntimeError(msg)
return locs
def view_limits(self, vmin, vmax):
"""
select a scale for the range from vmin to vmax
Normally this method is overridden by subclasses to
change locator behaviour.
"""
return mtransforms.nonsingular(vmin, vmax)
def autoscale(self):
"""autoscale the view limits"""
return self.view_limits(*self.axis.get_view_interval())
def pan(self, numsteps):
"""Pan numticks (can be positive or negative)"""
ticks = self()
numticks = len(ticks)
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
if numticks > 2:
step = numsteps * abs(ticks[0] - ticks[1])
else:
d = abs(vmax - vmin)
step = numsteps * d / 6.
vmin += step
vmax += step
self.axis.set_view_interval(vmin, vmax, ignore=True)
def zoom(self, direction):
"Zoom in/out on axis; if direction is >0 zoom in, else zoom out"
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
interval = abs(vmax - vmin)
step = 0.1 * interval * direction
self.axis.set_view_interval(vmin + step, vmax - step, ignore=True)
def refresh(self):
"""refresh internal information based on current lim"""
pass
class IndexLocator(Locator):
"""
Place a tick on every multiple of some base number of points
plotted, e.g., on every 5th point. It is assumed that you are doing
index plotting; i.e., the axis is 0, len(data). This is mainly
useful for x ticks.
"""
def __init__(self, base, offset):
'place ticks on the i-th data points where (i-offset)%base==0'
self._base = base
self.offset = offset
def set_params(self, base=None, offset=None):
"""Set parameters within this locator"""
if base is not None:
self._base = base
if offset is not None:
self.offset = offset
def __call__(self):
"""Return the locations of the ticks"""
dmin, dmax = self.axis.get_data_interval()
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
return self.raise_if_exceeds(
np.arange(vmin + self.offset, vmax + 1, self._base))
class FixedLocator(Locator):
"""
Tick locations are fixed. If nbins is not None,
the array of possible positions will be subsampled to
keep the number of ticks <= nbins +1.
The subsampling will be done so as to include the smallest
absolute value; for example, if zero is included in the
array of possibilities, then it is guaranteed to be one of
the chosen ticks.
"""
def __init__(self, locs, nbins=None):
self.locs = np.asarray(locs)
self.nbins = nbins
if self.nbins is not None:
self.nbins = max(self.nbins, 2)
def set_params(self, nbins=None):
"""Set parameters within this locator."""
if nbins is not None:
self.nbins = nbins
def __call__(self):
return self.tick_values(None, None)
def tick_values(self, vmin, vmax):
""""
Return the locations of the ticks.
.. note::
Because the values are fixed, vmin and vmax are not used in this
method.
"""
if self.nbins is None:
return self.locs
step = max(int(0.99 + len(self.locs) / float(self.nbins)), 1)
ticks = self.locs[::step]
for i in range(1, step):
ticks1 = self.locs[i::step]
if np.absolute(ticks1).min() < np.absolute(ticks).min():
ticks = ticks1
return self.raise_if_exceeds(ticks)
class NullLocator(Locator):
"""
No ticks
"""
def __call__(self):
return self.tick_values(None, None)
def tick_values(self, vmin, vmax):
""""
Return the locations of the ticks.
.. note::
Because the values are Null, vmin and vmax are not used in this
method.
"""
return []
class LinearLocator(Locator):
"""
Determine the tick locations
The first time this function is called it will try to set the
number of ticks to make a nice tick partitioning. Thereafter the
number of ticks will be fixed so that interactive navigation will
be nice
"""
def __init__(self, numticks=None, presets=None):
"""
Use presets to set locs based on lom. A dict mapping vmin, vmax->locs
"""
self.numticks = numticks
if presets is None:
self.presets = {}
else:
self.presets = presets
def set_params(self, numticks=None, presets=None):
"""Set parameters within this locator."""
if presets is not None:
self.presets = presets
if numticks is not None:
self.numticks = numticks
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
if vmax < vmin:
vmin, vmax = vmax, vmin
if (vmin, vmax) in self.presets:
return self.presets[(vmin, vmax)]
if self.numticks is None:
self._set_numticks()
if self.numticks == 0:
return []
ticklocs = np.linspace(vmin, vmax, self.numticks)
return self.raise_if_exceeds(ticklocs)
def _set_numticks(self):
self.numticks = 11 # todo; be smart here; this is just for dev
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
if vmax < vmin:
vmin, vmax = vmax, vmin
if vmin == vmax:
vmin -= 1
vmax += 1
exponent, remainder = divmod(math.log10(vmax - vmin), 1)
if remainder < 0.5:
exponent -= 1
scale = 10 ** (-exponent)
vmin = math.floor(scale * vmin) / scale
vmax = math.ceil(scale * vmax) / scale
return mtransforms.nonsingular(vmin, vmax)
def closeto(x, y):
if abs(x - y) < 1e-10:
return True
else:
return False
class Base(object):
'this solution has some hacks to deal with floating point inaccuracies'
def __init__(self, base):
if base <= 0:
raise ValueError("'base' must be positive")
self._base = base
def lt(self, x):
'return the largest multiple of base < x'
d, m = divmod(x, self._base)
if closeto(m, 0) and not closeto(m / self._base, 1):
return (d - 1) * self._base
return d * self._base
def le(self, x):
'return the largest multiple of base <= x'
d, m = divmod(x, self._base)
if closeto(m / self._base, 1): # was closeto(m, self._base)
#looks like floating point error
return (d + 1) * self._base
return d * self._base
def gt(self, x):
'return the smallest multiple of base > x'
d, m = divmod(x, self._base)
if closeto(m / self._base, 1):
#looks like floating point error
return (d + 2) * self._base
return (d + 1) * self._base
def ge(self, x):
'return the smallest multiple of base >= x'
d, m = divmod(x, self._base)
if closeto(m, 0) and not closeto(m / self._base, 1):
return d * self._base
return (d + 1) * self._base
def get_base(self):
return self._base
class MultipleLocator(Locator):
"""
Set a tick on every integer that is multiple of base in the
view interval
"""
def __init__(self, base=1.0):
self._base = Base(base)
def set_params(self, base):
"""Set parameters within this locator."""
if base is not None:
self._base = base
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
if vmax < vmin:
vmin, vmax = vmax, vmin
vmin = self._base.ge(vmin)
base = self._base.get_base()
n = (vmax - vmin + 0.001 * base) // base
locs = vmin - base + np.arange(n + 3) * base
return self.raise_if_exceeds(locs)
def view_limits(self, dmin, dmax):
"""
Set the view limits to the nearest multiples of base that
contain the data
"""
vmin = self._base.le(dmin)
vmax = self._base.ge(dmax)
if vmin == vmax:
vmin -= 1
vmax += 1
return mtransforms.nonsingular(vmin, vmax)
def scale_range(vmin, vmax, n=1, threshold=100):
dv = abs(vmax - vmin)
if dv == 0: # maxabsv == 0 is a special case of this.
return 1.0, 0.0
# Note: this should never occur because
# vmin, vmax should have been checked by nonsingular(),
# and spread apart if necessary.
meanv = 0.5 * (vmax + vmin)
if abs(meanv) / dv < threshold:
offset = 0
elif meanv > 0:
ex = divmod(math.log10(meanv), 1)[0]
offset = 10 ** ex
else:
ex = divmod(math.log10(-meanv), 1)[0]
offset = -10 ** ex
ex = divmod(math.log10(dv / n), 1)[0]
scale = 10 ** ex
return scale, offset
class MaxNLocator(Locator):
"""
Select no more than N intervals at nice locations.
"""
default_params = dict(nbins=10,
steps=None,
trim=True,
integer=False,
symmetric=False,
prune=None)
def __init__(self, *args, **kwargs):
"""
Keyword args:
*nbins*
Maximum number of intervals; one less than max number of ticks.
*steps*
Sequence of nice numbers starting with 1 and ending with 10;
e.g., [1, 2, 4, 5, 10]
*integer*
If True, ticks will take only integer values.
*symmetric*
If True, autoscaling will result in a range symmetric
about zero.
*prune*
['lower' | 'upper' | 'both' | None]
Remove edge ticks -- useful for stacked or ganged plots
where the upper tick of one axes overlaps with the lower
tick of the axes above it.
If prune=='lower', the smallest tick will
be removed. If prune=='upper', the largest tick will be
removed. If prune=='both', the largest and smallest ticks
will be removed. If prune==None, no ticks will be removed.
"""
# I left "trim" out; it defaults to True, and it is not
# clear that there is any use case for False, so we may
# want to remove that kwarg. EF 2010/04/18
if args:
kwargs['nbins'] = args[0]
if len(args) > 1:
raise ValueError(
"Keywords are required for all arguments except 'nbins'")
self.set_params(**self.default_params)
self.set_params(**kwargs)
def set_params(self, **kwargs):
"""Set parameters within this locator."""
if 'nbins' in kwargs:
self._nbins = int(kwargs['nbins'])
if 'trim' in kwargs:
self._trim = kwargs['trim']
if 'integer' in kwargs:
self._integer = kwargs['integer']
if 'symmetric' in kwargs:
self._symmetric = kwargs['symmetric']
if 'prune' in kwargs:
prune = kwargs['prune']
if prune is not None and prune not in ['upper', 'lower', 'both']:
raise ValueError(
"prune must be 'upper', 'lower', 'both', or None")
self._prune = prune
if 'steps' in kwargs:
steps = kwargs['steps']
if steps is None:
self._steps = [1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10]
else:
if int(steps[-1]) != 10:
steps = list(steps)
steps.append(10)
self._steps = steps
if 'integer' in kwargs:
self._integer = kwargs['integer']
if self._integer:
self._steps = [n for n in self._steps if divmod(n, 1)[1] < 0.001]
def bin_boundaries(self, vmin, vmax):
nbins = self._nbins
scale, offset = scale_range(vmin, vmax, nbins)
if self._integer:
scale = max(1, scale)
vmin = vmin - offset
vmax = vmax - offset
raw_step = (vmax - vmin) / nbins
scaled_raw_step = raw_step / scale
best_vmax = vmax
best_vmin = vmin
for step in self._steps:
if step < scaled_raw_step:
continue
step *= scale
best_vmin = step * divmod(vmin, step)[0]
best_vmax = best_vmin + step * nbins
if (best_vmax >= vmax):
break
if self._trim:
extra_bins = int(divmod((best_vmax - vmax), step)[0])
nbins -= extra_bins
return (np.arange(nbins + 1) * step + best_vmin + offset)
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=1e-13,
tiny=1e-14)
locs = self.bin_boundaries(vmin, vmax)
prune = self._prune
if prune == 'lower':
locs = locs[1:]
elif prune == 'upper':
locs = locs[:-1]
elif prune == 'both':
locs = locs[1:-1]
return self.raise_if_exceeds(locs)
def view_limits(self, dmin, dmax):
if self._symmetric:
maxabs = max(abs(dmin), abs(dmax))
dmin = -maxabs
dmax = maxabs
dmin, dmax = mtransforms.nonsingular(dmin, dmax, expander=1e-12,
tiny=1.e-13)
return np.take(self.bin_boundaries(dmin, dmax), [0, -1])
def decade_down(x, base=10):
'floor x to the nearest lower decade'
if x == 0.0:
return -base
lx = np.floor(np.log(x) / np.log(base))
return base ** lx
def decade_up(x, base=10):
'ceil x to the nearest higher decade'
if x == 0.0:
return base
lx = np.ceil(np.log(x) / np.log(base))
return base ** lx
def nearest_long(x):
if x == 0:
return long(0)
elif x > 0:
return long(x + 0.5)
else:
return long(x - 0.5)
def is_decade(x, base=10):
if not np.isfinite(x):
return False
if x == 0.0:
return True
lx = np.log(np.abs(x)) / np.log(base)
return is_close_to_int(lx)
def is_close_to_int(x):
if not np.isfinite(x):
return False
return abs(x - nearest_long(x)) < 1e-10
class LogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, base=10.0, subs=[1.0], numdecs=4, numticks=15):
"""
place ticks on the location= base**i*subs[j]
"""
self.base(base)
self.subs(subs)
# this needs to be validated > 1 with traitlets
self.numticks = numticks
self.numdecs = numdecs
def set_params(self, base=None, subs=None, numdecs=None, numticks=None):
"""Set parameters within this locator."""
if base is not None:
self.base = base
if subs is not None:
self.subs = subs
if numdecs is not None:
self.numdecs = numdecs
if numticks is not None:
self.numticks = numticks
def base(self, base):
"""
set the base of the log scaling (major tick every base**i, i integer)
"""
self._base = base + 0.0
def subs(self, subs):
"""
set the minor ticks the log scaling every base**i*subs[j]
"""
if subs is None:
self._subs = None # autosub
else:
self._subs = np.asarray(subs) + 0.0
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
b = self._base
# dummy axis has no axes attribute
if hasattr(self.axis, 'axes') and self.axis.axes.name == 'polar':
vmax = math.ceil(math.log(vmax) / math.log(b))
decades = np.arange(vmax - self.numdecs, vmax)
ticklocs = b ** decades
return ticklocs
if vmin <= 0.0:
if self.axis is not None:
vmin = self.axis.get_minpos()
if vmin <= 0.0 or not np.isfinite(vmin):
raise ValueError(
"Data has no positive values, and therefore can not be "
"log-scaled.")
vmin = math.log(vmin) / math.log(b)
vmax = math.log(vmax) / math.log(b)
if vmax < vmin:
vmin, vmax = vmax, vmin
numdec = math.floor(vmax) - math.ceil(vmin)
if self._subs is None: # autosub
if numdec > 10:
subs = np.array([1.0])
elif numdec > 6:
subs = np.arange(2.0, b, 2.0)
else:
subs = np.arange(2.0, b)
else:
subs = self._subs
stride = 1
if not self.numticks > 1:
raise RuntimeError('The number of ticks must be greater than 1 '
'for LogLocator.')
while numdec / stride + 1 > self.numticks:
stride += 1
decades = np.arange(math.floor(vmin) - stride,
math.ceil(vmax) + 2 * stride, stride)
if hasattr(self, '_transform'):
ticklocs = self._transform.inverted().transform(decades)
if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
ticklocs = np.ravel(np.outer(subs, ticklocs))
else:
if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
ticklocs = []
for decadeStart in b ** decades:
ticklocs.extend(subs * decadeStart)
else:
ticklocs = b ** decades
return self.raise_if_exceeds(np.asarray(ticklocs))
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
b = self._base
if vmax < vmin:
vmin, vmax = vmax, vmin
if self.axis.axes.name == 'polar':
vmax = math.ceil(math.log(vmax) / math.log(b))
vmin = b ** (vmax - self.numdecs)
return vmin, vmax
minpos = self.axis.get_minpos()
if minpos <= 0 or not np.isfinite(minpos):
raise ValueError(
"Data has no positive values, and therefore can not be "
"log-scaled.")
if vmin <= minpos:
vmin = minpos
if not is_decade(vmin, self._base):
vmin = decade_down(vmin, self._base)
if not is_decade(vmax, self._base):
vmax = decade_up(vmax, self._base)
if vmin == vmax:
vmin = decade_down(vmin, self._base)
vmax = decade_up(vmax, self._base)
result = mtransforms.nonsingular(vmin, vmax)
return result
class SymmetricalLogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, transform, subs=None):
"""
place ticks on the location= base**i*subs[j]
"""
self._transform = transform
if subs is None:
self._subs = [1.0]
else:
self._subs = subs
self.numticks = 15
def set_params(self, subs=None, numticks=None):
"""Set parameters within this locator."""
if numticks is not None:
self.numticks = numticks
if subs is not None:
self._subs = subs
def __call__(self):
'Return the locations of the ticks'
# Note, these are untransformed coordinates
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
b = self._transform.base
t = self._transform.linthresh
if vmax < vmin:
vmin, vmax = vmax, vmin
# The domain is divided into three sections, only some of
# which may actually be present.
#
# <======== -t ==0== t ========>
# aaaaaaaaa bbbbb ccccccccc
#
# a) and c) will have ticks at integral log positions. The
# number of ticks needs to be reduced if there are more
# than self.numticks of them.
#
# b) has a tick at 0 and only 0 (we assume t is a small
# number, and the linear segment is just an implementation
# detail and not interesting.)
#
# We could also add ticks at t, but that seems to usually be
# uninteresting.
#
# "simple" mode is when the range falls entirely within (-t,
# t) -- it should just display (vmin, 0, vmax)
has_a = has_b = has_c = False
if vmin < -t:
has_a = True
if vmax > -t:
has_b = True
if vmax > t:
has_c = True
elif vmin < 0:
if vmax > 0:
has_b = True
if vmax > t:
has_c = True
else:
return [vmin, vmax]
elif vmin < t:
if vmax > t:
has_b = True
has_c = True
else:
return [vmin, vmax]
else:
has_c = True
def get_log_range(lo, hi):
lo = np.floor(np.log(lo) / np.log(b))
hi = np.ceil(np.log(hi) / np.log(b))
return lo, hi
# First, calculate all the ranges, so we can determine striding
if has_a:
if has_b:
a_range = get_log_range(t, -vmin + 1)
else:
a_range = get_log_range(-vmax, -vmin + 1)
else:
a_range = (0, 0)
if has_c:
if has_b:
c_range = get_log_range(t, vmax + 1)
else:
c_range = get_log_range(vmin, vmax + 1)
else:
c_range = (0, 0)
total_ticks = (a_range[1] - a_range[0]) + (c_range[1] - c_range[0])
if has_b:
total_ticks += 1
stride = max(np.floor(float(total_ticks) / (self.numticks - 1)), 1)
decades = []
if has_a:
decades.extend(-1 * (b ** (np.arange(a_range[0], a_range[1],
stride)[::-1])))
if has_b:
decades.append(0.0)
if has_c:
decades.extend(b ** (np.arange(c_range[0], c_range[1], stride)))
# Add the subticks if requested
if self._subs is None:
subs = np.arange(2.0, b)
else:
subs = np.asarray(self._subs)
if len(subs) > 1 or subs[0] != 1.0:
ticklocs = []
for decade in decades:
ticklocs.extend(subs * decade)
else:
ticklocs = decades
return self.raise_if_exceeds(np.array(ticklocs))
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
b = self._transform.base
if vmax < vmin:
vmin, vmax = vmax, vmin
if not is_decade(abs(vmin), b):
if vmin < 0:
vmin = -decade_up(-vmin, b)
else:
vmin = decade_down(vmin, b)
if not is_decade(abs(vmax), b):
if vmax < 0:
vmax = -decade_down(-vmax, b)
else:
vmax = decade_up(vmax, b)
if vmin == vmax:
if vmin < 0:
vmin = -decade_up(-vmin, b)
vmax = -decade_down(-vmax, b)
else:
vmin = decade_down(vmin, b)
vmax = decade_up(vmax, b)
result = mtransforms.nonsingular(vmin, vmax)
return result
class LogitLocator(Locator):
"""
Determine the tick locations for logit axes
"""
def __init__(self, minor=False):
"""
place ticks on the logit locations
"""
self.minor = minor
def set_params(self, minor=None):
"""Set parameters within this locator."""
if minor is not None:
self.minor = minor
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
# dummy axis has no axes attribute
if hasattr(self.axis, 'axes') and self.axis.axes.name == 'polar':
raise NotImplementedError('Polar axis cannot be logit scaled yet')
# what to do if a window beyond ]0, 1[ is chosen
if vmin <= 0.0:
if self.axis is not None:
vmin = self.axis.get_minpos()
if (vmin <= 0.0) or (not np.isfinite(vmin)):
raise ValueError(
"Data has no values in ]0, 1[ and therefore can not be "
"logit-scaled.")
# NOTE: for vmax, we should query a property similar to get_minpos, but
# related to the maximal, less-than-one data point. Unfortunately,
# get_minpos is defined very deep in the BBox and updated with data,
# so for now we use the trick below.
if vmax >= 1.0:
if self.axis is not None:
vmax = 1 - self.axis.get_minpos()
if (vmax >= 1.0) or (not np.isfinite(vmax)):
raise ValueError(
"Data has no values in ]0, 1[ and therefore can not be "
"logit-scaled.")
if vmax < vmin:
vmin, vmax = vmax, vmin
vmin = np.log10(vmin / (1 - vmin))
vmax = np.log10(vmax / (1 - vmax))
decade_min = np.floor(vmin)
decade_max = np.ceil(vmax)
# major ticks
if not self.minor:
ticklocs = []
if (decade_min <= -1):
expo = np.arange(decade_min, min(0, decade_max + 1))
ticklocs.extend(list(10**expo))
if (decade_min <= 0) and (decade_max >= 0):
ticklocs.append(0.5)
if (decade_max >= 1):
expo = -np.arange(max(1, decade_min), decade_max + 1)
ticklocs.extend(list(1 - 10**expo))
# minor ticks
else:
ticklocs = []
if (decade_min <= -2):
expo = np.arange(decade_min, min(-1, decade_max))
newticks = np.outer(np.arange(2, 10), 10**expo).ravel()
ticklocs.extend(list(newticks))
if (decade_min <= 0) and (decade_max >= 0):
ticklocs.extend([0.2, 0.3, 0.4, 0.6, 0.7, 0.8])
if (decade_max >= 2):
expo = -np.arange(max(2, decade_min), decade_max + 1)
newticks = 1 - np.outer(np.arange(2, 10), 10**expo).ravel()
ticklocs.extend(list(newticks))
return self.raise_if_exceeds(np.array(ticklocs))
class AutoLocator(MaxNLocator):
def __init__(self):
MaxNLocator.__init__(self, nbins=9, steps=[1, 2, 5, 10])
class AutoMinorLocator(Locator):
"""
Dynamically find minor tick positions based on the positions of
major ticks. Assumes the scale is linear and major ticks are
evenly spaced.
"""
def __init__(self, n=None):
"""
*n* is the number of subdivisions of the interval between
major ticks; e.g., n=2 will place a single minor tick midway
between major ticks.
If *n* is omitted or None, it will be set to 5 or 4.
"""
self.ndivs = n
def __call__(self):
'Return the locations of the ticks'
majorlocs = self.axis.get_majorticklocs()
try:
majorstep = majorlocs[1] - majorlocs[0]
except IndexError:
# Need at least two major ticks to find minor tick locations
# TODO: Figure out a way to still be able to display minor
# ticks without two major ticks visible. For now, just display
# no ticks at all.
majorstep = 0
if self.ndivs is None:
if majorstep == 0:
# TODO: Need a better way to figure out ndivs
ndivs = 1
else:
x = int(round(10 ** (np.log10(majorstep) % 1)))
if x in [1, 5, 10]:
ndivs = 5
else:
ndivs = 4
else:
ndivs = self.ndivs
minorstep = majorstep / ndivs
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
if len(majorlocs) > 0:
t0 = majorlocs[0]
tmin = ((vmin - t0) // minorstep + 1) * minorstep
tmax = ((vmax - t0) // minorstep + 1) * minorstep
locs = np.arange(tmin, tmax, minorstep) + t0
cond = np.abs((locs - t0) % majorstep) > minorstep / 10.0
locs = locs.compress(cond)
else:
locs = []
return self.raise_if_exceeds(np.array(locs))
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
class OldAutoLocator(Locator):
"""
On autoscale this class picks the best MultipleLocator to set the
view limits and the tick locs.
"""
def __init__(self):
self._locator = LinearLocator()
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self.raise_if_exceeds(self._locator())
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
def refresh(self):
'refresh internal information based on current lim'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
d = abs(vmax - vmin)
self._locator = self.get_locator(d)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
d = abs(vmax - vmin)
self._locator = self.get_locator(d)
return self._locator.view_limits(vmin, vmax)
def get_locator(self, d):
'pick the best locator based on a distance'
d = abs(d)
if d <= 0:
locator = MultipleLocator(0.2)
else:
try:
ld = math.log10(d)
except OverflowError:
raise RuntimeError('AutoLocator illegal data interval range')
fld = math.floor(ld)
base = 10 ** fld
#if ld==fld: base = 10**(fld-1)
#else: base = 10**fld
if d >= 5 * base:
ticksize = base
elif d >= 2 * base:
ticksize = base / 2.0
else:
ticksize = base / 5.0
locator = MultipleLocator(ticksize)
return locator
__all__ = ('TickHelper', 'Formatter', 'FixedFormatter',
'NullFormatter', 'FuncFormatter', 'FormatStrFormatter',
'StrMethodFormatter', 'ScalarFormatter', 'LogFormatter',
'LogFormatterExponent', 'LogFormatterMathtext', 'Locator',
'IndexLocator', 'FixedLocator', 'NullLocator',
'LinearLocator', 'LogLocator', 'AutoLocator',
'MultipleLocator', 'MaxNLocator', 'AutoMinorLocator',
'SymmetricalLogLocator')
| mit |
mc-hammertimeseries/cs207project | procs/_corr.py | 1 | 2825 | import numpy.fft as nfft
import numpy as np
import timeseries as ts
from scipy.stats import norm
from .fft import fft
def tsmaker(m, s, j):
meta={}
meta['order'] = int(np.random.choice([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]))
meta['blarg'] = int(np.random.choice([1, 2]))
t = np.arange(0.0, 1.0, 0.01)
v = norm.pdf(t, m, s) + j*np.random.randn(100)
return meta, ts.TimeSeries(t, v)
def random_ts(a):
t = np.arange(0.0, 1.0, 0.01)
v = a*np.random.random(100)
return ts.TimeSeries(t, v)
def stand(x, m, s):
return (x-m)/s
def ccor(ts1, ts2):
"""
Given two standardized time series, compute their cross-correlation using FFT.
"""
v1 = ts1.values()
v2 = ts2.values()
N = len(v1)
out_arr = np.empty(N, dtype=complex)
fft.fft_ccor(np.array(v1, dtype=complex),
np.array(v2, dtype=complex),
out_arr)
# out_arr is in order [0, N-1, N-2, ..., 1], so reorder it.
return np.array(out_arr[[0] + list(range(len(out_arr)-1, 0, -1))], dtype=float)
def max_corr_at_phase(ts1, ts2):
ccorts = ccor(ts1, ts2)
idx = np.argmax(ccorts)
maxcorr = ccorts[idx]
return idx, maxcorr
#The equation for the kernelized cross correlation is given at
#http://www.cs.tufts.edu/~roni/PUB/ecml09-tskernels.pdf
#normalize the kernel there by np.sqrt(K(x,x)K(y,y)) so that the correlation
#of a time series with itself is 1.
def kernel_corr(ts1, ts2, mult=1):
"compute a kernelized correlation so that we can get a real distance"
def kernel(ts1,ts2):
v1 = ts1.values()
v2 = ts2.values()
s = 0
for i in range(len(v1)):
# print(np.dot(v1, np.concatenate((np.zeros(i),v2[i:]))))
s += np.exp(np.dot(v1, np.concatenate((v2[i:], v2[0:i]))))
return s
return kernel(ts1, ts2) / np.sqrt(kernel(ts1, ts1) * kernel(ts2, ts2))
#this is for a quick and dirty test of these functions
#you might need to add procs to pythonpath for this to work
if __name__ == "__main__":
print("HI")
_, t1 = tsmaker(0.5, 0.1, 0.01)
_, t2 = tsmaker(0.5, 0.1, 0.01)
print(t1.mean(), t1.std(), t2.mean(), t2.std())
import matplotlib.pyplot as plt
plt.plot(t1)
plt.plot(t2)
plt.show()
standts1 = stand(t1, t1.mean(), t1.std())
standts2 = stand(t2, t2.mean(), t2.std())
idx, mcorr = max_corr_at_phase(standts1, standts2)
print(idx, mcorr)
sumcorr = kernel_corr(standts1, standts2, mult=10)
print(sumcorr)
t3 = random_ts(2)
t4 = random_ts(3)
plt.plot(t3)
plt.plot(t4)
plt.show()
standts3 = stand(t3, t3.mean(), t3.std())
standts4 = stand(t4, t4.mean(), t4.std())
idx, mcorr = max_corr_at_phase(standts3, standts4)
print(idx, mcorr)
sumcorr = kernel_corr(standts3, standts4, mult=10)
print(sumcorr) | mit |
Myasuka/scikit-learn | sklearn/preprocessing/data.py | 113 | 56747 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
alan-unravel/bokeh | bokeh/charts/builder/donut_builder.py | 31 | 8206 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Donut class which lets you build your Donut charts just passing
the arguments to the Chart class and calling the proper functions.
It also add a new chained stacked method.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division
from math import pi
import pandas as pd
from ..utils import cycle_colors, polar_to_cartesian
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, GlyphRenderer, Range1d
from ...models.glyphs import AnnularWedge, Text, Wedge
from ...properties import Any, Bool, Either, List
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Donut(values, cat=None, width=800, height=800, xgrid=False, ygrid=False, **kws):
""" Creates a Donut chart using :class:`DonutBuilder <bokeh.charts.builder.donut_builder.DonutBuilder>`
to render the geometry from values and cat.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
cat (list or bool, optional): list of string representing the categories.
Defaults to None.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.charts import Donut, output_file, show
# dict, OrderedDict, lists, arrays and DataFrames are valid inputs
xyvalues = [[2., 5., 3.], [4., 1., 4.], [6., 4., 3.]]
donut = Donut(xyvalues, ['cpu1', 'cpu2', 'cpu3'])
output_file('donut.html')
show(donut)
"""
return create_and_build(
DonutBuilder, values, cat=cat, width=width, height=height,
xgrid=xgrid, ygrid=ygrid, **kws
)
class DonutBuilder(Builder):
"""This is the Donut class and it is in charge of plotting
Donut chart in an easy and intuitive way.
Essentially, it provides a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the donut slices and angles.
And finally add the needed glyphs (Wedges and AnnularWedges) taking
the references from the source.
"""
cat = Either(Bool, List(Any), help="""
List of string representing the categories. (Defaults to None.)
""")
def _process_data(self):
"""Take the chart data from self._values.
It calculates the chart properties accordingly (start/end angles).
Then build a dict containing references to all the calculated
points to be used by the Wedge glyph inside the ``_yield_renderers`` method.
"""
dd = dict(zip(self._values.keys(), self._values.values()))
self._df = df = pd.DataFrame(dd)
self._groups = df.index = self.cat
df.columns = self._values.keys()
# Get the sum per category
aggregated = df.T.sum()
# Get the total (sum of all categories)
self._total_units = total = aggregated.sum()
radians = lambda x: 2*pi*(x/total)
angles = aggregated.map(radians).cumsum()
end_angles = angles.tolist()
start_angles = [0] + end_angles[:-1]
colors = cycle_colors(self.cat, self.palette)
self.set_and_get("", "colors", colors)
self.set_and_get("", "end", end_angles)
self.set_and_get("", "start", start_angles)
def _set_sources(self):
"""Push the Donut data into the ColumnDataSource and calculate
the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = Range1d(start=-2, end=2)
self.y_range = Range1d(start=-2, end=2)
def draw_central_wedge(self):
"""Draw the central part of the donut wedge from donut.source and
its calculated start and end angles.
"""
glyph = Wedge(
x=0, y=0, radius=1, start_angle="start", end_angle="end",
line_color="white", line_width=2, fill_color="colors"
)
yield GlyphRenderer(data_source=self._source, glyph=glyph)
def draw_central_descriptions(self):
"""Draw the descriptions to be placed on the central part of the
donut wedge
"""
text = ["%s" % cat for cat in self.cat]
x, y = polar_to_cartesian(0.7, self._data["start"], self._data["end"])
text_source = ColumnDataSource(dict(text=text, x=x, y=y))
glyph = Text(
x="x", y="y", text="text",
text_align="center", text_baseline="middle"
)
yield GlyphRenderer(data_source=text_source, glyph=glyph)
def draw_external_ring(self, colors=None):
"""Draw the external part of the donut wedge from donut.source
and its related descriptions
"""
if colors is None:
colors = cycle_colors(self.cat, self.palette)
first = True
for i, (cat, start_angle, end_angle) in enumerate(zip(
self.cat, self._data['start'], self._data['end'])):
details = self._df.ix[i]
radians = lambda x: 2*pi*(x/self._total_units)
angles = details.map(radians).cumsum() + start_angle
end = angles.tolist() + [end_angle]
start = [start_angle] + end[:-1]
base_color = colors[i]
#fill = [ base_color.lighten(i*0.05) for i in range(len(details) + 1) ]
fill = [base_color for i in range(len(details) + 1)]
text = [rowlabel for rowlabel in details.index]
x, y = polar_to_cartesian(1.25, start, end)
source = ColumnDataSource(dict(start=start, end=end, fill=fill))
glyph = AnnularWedge(
x=0, y=0, inner_radius=1, outer_radius=1.5,
start_angle="start", end_angle="end",
line_color="white", line_width=2,
fill_color="fill"
)
yield GlyphRenderer(data_source=source, glyph=glyph)
text_angle = [(start[i]+end[i])/2 for i in range(len(start))]
text_angle = [angle + pi if pi/2 < angle < 3*pi/2 else angle
for angle in text_angle]
if first and text:
text.insert(0, '')
offset = pi / 48
text_angle.insert(0, text_angle[0] - offset)
start.insert(0, start[0] - offset)
end.insert(0, end[0] - offset)
x, y = polar_to_cartesian(1.25, start, end)
first = False
data = dict(text=text, x=x, y=y, angle=text_angle)
text_source = ColumnDataSource(data)
glyph = Text(
x="x", y="y", text="text", angle="angle",
text_align="center", text_baseline="middle"
)
yield GlyphRenderer(data_source=text_source, glyph=glyph)
def _yield_renderers(self):
"""Use the AnnularWedge and Wedge glyphs to display the wedges.
Takes reference points from data loaded at the ColumnDataSurce.
"""
# build the central round area of the donut
renderers = []
renderers += self.draw_central_wedge()
# write central descriptions
renderers += self.draw_central_descriptions()
# build external donut ring
renderers += self.draw_external_ring()
return renderers
| bsd-3-clause |
gnavvy/JellyFish | app.py | 1 | 2566 | __author__ = 'ywang'
from gevent import monkey
monkey.patch_all()
from flask import Flask, render_template
from flask_socketio import SocketIO, emit
# from flask.ext import assets
app = Flask(__name__)
socketio = SocketIO(app)
# import os
# env = assets.Environment(app)
# env.load_path = [
# os.path.join(os.path.dirname(__file__), 'static'),
# os.path.join(os.path.dirname(__file__), 'bower_components')
# ]
# env.register('js_all', assets.Bundle(
# 'jquery/dist/jquery.min.js',
# 'jquery-ui/jquery-ui.min.js',
# 'underscore/underscore-min.js',
# 'react/react.min.js',
# 'socket.io-client/dist/socket.io.min.js',
# 'd3/d3.min.js',
# 'js/const.js',
# output='js_all.js'
# ))
import numpy as np
np.set_printoptions(precision=4)
np.set_printoptions(threshold=1000)
np.set_printoptions(linewidth=1000)
np.set_printoptions(suppress=True)
import pandas as pd
pd.set_option('display.width', 1000)
pd.set_option('display.max_rows', 50)
import json
from random import random
df = pd.read_csv('data/wine.csv')
@app.route('/')
def index():
return render_template('index.html')
@socketio.on('connect')
def test_connect():
emit('handshake', {'data': 'Connected', 'count': 0})
@socketio.on('scatter-plot:mounted')
def get_scatter_plot_data(req):
method = req['method']
print(method)
emit(method+':data', {
'x': df[method+'.x'].to_json(orient='values'),
'y': df[method+'.y'].to_json(orient='values'),
'val': df[method+'.jaccard'].to_json(orient='values'),
'cls': df['Class'].to_json(orient='values')
})
@socketio.on('widget:mounted')
def get_widget_data(req):
method = req['method']
print(method)
if method == 'default-method':
pass
emit(method+':data', {
'x': df[method+'.x'].to_json(orient='values'),
'y': df[method+'.y'].to_json(orient='values'),
'val': df[method+'.jaccard'].to_json(orient='values'),
'cls': df['Class'].to_json(orient='values')
})
@socketio.on('compass:mounted')
def get_compass_data():
numAxes = df.shape[1]
# uniformly distributed
axes = [{0.0: 0, 1.0: 2.0 * i / numAxes} for i in range(numAxes)]
data = [{random(): 2.0 * random()} for i in range(10)]
emit('compass:data', {
'axes': json.dumps(axes),
'data': json.dumps(data)
})
@socketio.on('matrix:mounted')
def get_matrix_data():
data = df.iloc[:, :-1].corr()
emit('matrix:data', {'data': data.to_json()})
if __name__ == '__main__':
app.debug = True
socketio.run(app)
| mit |
shusenl/scikit-learn | examples/manifold/plot_manifold_sphere.py | 258 | 5101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
aabadie/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 82 | 4768 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_splits=3):
cv = KFold(n_splits=n_splits)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv.split(X_train, y_train):
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_splits
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
jbrundle/earthquake-forecasts | plot_Forecast_EPS_Region_Circle.py | 1 | 1373 | #!/opt/local/bin python
##############################################################################
# Code plots Gutenberg-Richter relation for cumulative frequency-magnitude
# Usage: python plot_ANSS_seismicity.py NELat NELng SWLat SWLng MagLo
#
# Where: Latitude in degrees
# Longitude in degrees
# NE and SW corners of the map rectangle must be specified
# MagLo is the lower limit for the event magnitudes (typically 5.5)
##############################################################################
#import sys
#sys.path.reverse()
import sys
import matplotlib
import numpy as np
from mpl_toolkits.basemap import Basemap
from array import array
import matplotlib.pyplot as plt
import urllib
import datetime
import dateutil.parser
import EQMethods
##############################################################################
def main(argv=None):
NELat = float(sys.argv[1])
NELng = float(sys.argv[2])
SWLat = float(sys.argv[3])
SWLng = float(sys.argv[4])
MagLo = float(sys.argv[5])
Location = str(sys.argv[6])
# EQMethods.get_catalog(NELat, NELng, SWLat, SWLng, MagLo)
EQMethods.forecast_eps_region_circle(NELat, NELng, SWLat, SWLng, MagLo, Location)
#
if __name__ == "__main__":
sys.exit(main())
| mit |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/tree/tree.py | 9 | 29885 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Licence: BSD 3 clause
from __future__ import division
import numbers
import numpy as np
from abc import ABCMeta, abstractmethod
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples. Use ``dtype=np.float32`` for maximum
efficiency.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
# Convert data
if check_input:
X = check_array(X, dtype=DTYPE)
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict(self, X):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = check_array(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
tree_ : Tree object
The underlying Tree object.
max_features_ : int,
The infered value of max_features.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
def predict_proba(self, X):
"""Predict class probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = check_array(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first.")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in xrange(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
tree_ : Tree object
The underlying Tree object.
max_features_ : int,
The infered value of max_features.
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
robcarver17/pysystemtrade | syscore/dateutils.py | 1 | 20758 | """
Various routines to do with dates
"""
from enum import Enum
import datetime
import time
import calendar
import numpy as np
import pandas as pd
from syscore.genutils import sign
from syscore.objects import missing_data
"""
First some constants
"""
CALENDAR_DAYS_IN_YEAR = 365.25
BUSINESS_DAYS_IN_YEAR = 256.0
ROOT_BDAYS_INYEAR = BUSINESS_DAYS_IN_YEAR ** 0.5
WEEKS_IN_YEAR = CALENDAR_DAYS_IN_YEAR / 7.0
ROOT_WEEKS_IN_YEAR = WEEKS_IN_YEAR ** 0.5
MONTHS_IN_YEAR = 12.0
ROOT_MONTHS_IN_YEAR = MONTHS_IN_YEAR ** 0.5
APPROX_DAYS_IN_MONTH = CALENDAR_DAYS_IN_YEAR / MONTHS_IN_YEAR
ARBITRARY_START = datetime.datetime(1900, 1, 1)
HOURS_PER_DAY = 24
MINUTES_PER_HOUR = 60
SECONDS_PER_HOUR = 60
SECONDS_PER_DAY = HOURS_PER_DAY * MINUTES_PER_HOUR * SECONDS_PER_HOUR
SECONDS_IN_YEAR = CALENDAR_DAYS_IN_YEAR * SECONDS_PER_DAY
MINUTES_PER_YEAR = CALENDAR_DAYS_IN_YEAR * HOURS_PER_DAY * MINUTES_PER_HOUR
UNIXTIME_CONVERTER = 1e9
UNIXTIME_IN_YEAR = UNIXTIME_CONVERTER * SECONDS_IN_YEAR
MONTH_LIST = ["F", "G", "H", "J", "K", "M", "N", "Q", "U", "V", "X", "Z"]
Frequency = Enum('Frequency', 'Unknown Year Month Week BDay Day Hour Minutes_15 Minutes_5 Minute Seconds_10 Second')
DAILY_PRICE_FREQ = Frequency.Day
BUSINESS_DAY_FREQ = Frequency.BDay
def from_config_frequency_pandas_resample(freq: Frequency) -> str:
LOOKUP_TABLE = {Frequency.BDay: 'B',
Frequency.Week: 'W',
Frequency.Month: 'M',
Frequency.Hour: 'H',
Frequency.Year: 'A',
Frequency.Day: 'D',
Frequency.Minutes_15: '15T',
Frequency.Minutes_5: '5T',
Frequency.Seconds_10: '10S',
Frequency.Second: 'S'}
resample_string = LOOKUP_TABLE.get(freq, missing_data)
return resample_string
def from_frequency_to_times_per_year(freq: Frequency) -> float:
LOOKUP_TABLE = {Frequency.BDay: BUSINESS_DAYS_IN_YEAR,
Frequency.Week: WEEKS_IN_YEAR,
Frequency.Month: MONTHS_IN_YEAR,
Frequency.Hour: HOURS_PER_DAY * BUSINESS_DAYS_IN_YEAR,
Frequency.Year: 1,
Frequency.Day: CALENDAR_DAYS_IN_YEAR,
Frequency.Minutes_15: (MINUTES_PER_YEAR/15),
Frequency.Minutes_5: (MINUTES_PER_YEAR/5),
Frequency.Seconds_10: SECONDS_IN_YEAR/10,
Frequency.Second: SECONDS_IN_YEAR}
times_per_year = LOOKUP_TABLE.get(freq, missing_data)
return float(times_per_year)
def from_config_frequency_to_frequency(freq_as_str:str)-> Frequency:
LOOKUP_TABLE = {'Y': Frequency.Year,
'm': Frequency.Month,
'W': Frequency.Week,
'D':Frequency.Day,
'H':Frequency.Hour,
'15M': Frequency.Minutes_15,
'5M': Frequency.Minutes_5,
'M': Frequency.Minute,
'10S': Frequency.Seconds_10,
'S': Frequency.Second}
frequency = LOOKUP_TABLE.get(freq_as_str, missing_data)
return frequency
def month_from_contract_letter(contract_letter: str) -> int:
"""
Returns month number (1 is January) from contract letter
>>> month_from_contract_letter("F")
1
>>> month_from_contract_letter("Z")
12
>>> month_from_contract_letter("A")
Exception: Contract letter A is not a valid future month (must be one of ['F', 'G', 'H', 'J', 'K', 'M', 'N', 'Q', 'U', 'V', 'X', 'Z'])
"""
try:
month_number = MONTH_LIST.index(contract_letter)
except ValueError:
raise Exception("Contract letter %s is not a valid future month (must be one of %s)" %
(contract_letter, str(MONTH_LIST)))
return month_number + 1
def contract_month_from_number(month_number: int) -> str:
"""
Returns standard month letters used in futures land
>>> contract_month_from_number(1)
'F'
>>> contract_month_from_number(12)
'Z'
>>> contract_month_from_number(0)
AssertionError
>>> contract_month_from_number(13)
AssertionError
:param month_number: int
:return: str
"""
assert month_number>0 and month_number<13
return MONTH_LIST[month_number - 1]
def get_datetime_from_datestring(datestring: str):
"""
Translates a date which could be "20150305" or "201505" into a datetime
:param datestring: Date to be processed
:type days: str
:returns: datetime.datetime
>>> get_datetime_from_datestring('201503')
datetime.datetime(2015, 3, 1, 0, 0)
>>> get_datetime_from_datestring('20150300')
datetime.datetime(2015, 3, 1, 0, 0)
>>> get_datetime_from_datestring('20150305')
datetime.datetime(2015, 3, 5, 0, 0)
>>> get_datetime_from_datestring('2015031')
Exception: 2015031 needs to be a string with 6 or 8 digits
>>> get_datetime_from_datestring('2015013')
Exception: 2015013 needs to be a string with 6 or 8 digits
"""
# do string expiry calc
if len(datestring) == 8:
if datestring[6:8] == "00":
return datetime.datetime.strptime(datestring, "%Y%m")
else:
return datetime.datetime.strptime(datestring, "%Y%m%d")
if len(datestring) == 6:
return datetime.datetime.strptime(datestring, "%Y%m")
else:
raise Exception(
"%s needs to be a string with 6 or 8 digits" % datestring
)
def _DEPRECATE_fraction_of_year_between_price_and_carry_expiries(carry_row: pd.Series,
floor_date_diff: float = 1/CALENDAR_DAYS_IN_YEAR) -> float:
"""
Given a pandas row containing CARRY_CONTRACT and PRICE_CONTRACT, both of
which represent dates
Return the difference between the dates as a fraction
Positive means PRICE BEFORE CARRY, negative means CARRY BEFORE PRICE
:param carry_row: object with attributes CARRY_CONTRACT and PRICE_CONTRACT
:type carry_row: pandas row, or something that quacks like it
:param floor_date_diff: If date resolves to less than this, floor here (*default* 20)
:type int
:returns: float
>>> import pandas as pd
>>> carry_df = pd.DataFrame(dict(PRICE_CONTRACT =["20200601", "20200601", "20200601"],\
CARRY_CONTRACT = ["20200303", "20200905", "20200603"]))
>>> fraction_of_year_between_price_and_carry_expiries(carry_df.iloc[0])
-0.2464065708418891
>>> fraction_of_year_between_price_and_carry_expiries(carry_df.iloc[1])
0.26283367556468173
>>> fraction_of_year_between_price_and_carry_expiries(carry_df.iloc[2], floor_date_diff= 50)
0.13689253935660506
"""
fraction_of_year_between_expiries = _DEPRECATE_get_fraction_of_year_between_expiries(carry_row)
if np.isnan(fraction_of_year_between_expiries):
return np.nan
fraction_of_year_between_expiries = _DEPRECATE_apply_floor_to_date_differential(fraction_of_year_between_expiries,
floor_date_diff=floor_date_diff)
return fraction_of_year_between_expiries
def _DEPRECATE_get_fraction_of_year_between_expiries(carry_row) -> float:
if carry_row.PRICE_CONTRACT == "" or carry_row.CARRY_CONTRACT == "":
return np.nan
carry_expiry = _DEPRECATE_get_approx_year_as_number_from_date_as_string(carry_row.CARRY_CONTRACT)
price_expiry = _DEPRECATE_get_approx_year_as_number_from_date_as_string(carry_row.PRICE_CONTRACT)
fraction_of_year_between_expiries = carry_expiry - price_expiry
return fraction_of_year_between_expiries
def _DEPRECATE_get_approx_year_as_number_from_date_as_string(date_string: str):
## Faster than using get_datetime_from_datestring, and approximate
year = float(date_string[:4])
month = float(date_string[5:])
month_as_year_frac = month / 12.0
year_from_zero = year + month_as_year_frac
return year_from_zero
def _DEPRECATE_apply_floor_to_date_differential(fraction_of_year_between_expiries: float,
floor_date_diff: float):
if abs(fraction_of_year_between_expiries) < floor_date_diff:
fraction_of_year_between_expiries = \
sign(fraction_of_year_between_expiries) * floor_date_diff
return fraction_of_year_between_expiries
class fit_dates_object(object):
def __init__(
self,
fit_start,
fit_end,
period_start,
period_end,
no_data=False):
setattr(self, "fit_start", fit_start)
setattr(self, "fit_end", fit_end)
setattr(self, "period_start", period_start)
setattr(self, "period_end", period_end)
setattr(self, "no_data", no_data)
def __repr__(self):
if self.no_data:
return "Fit without data, use from %s to %s" % (
self.period_start,
self.period_end,
)
else:
return "Fit from %s to %s, use in %s to %s" % (
self.fit_start,
self.fit_end,
self.period_start,
self.period_end,
)
def generate_fitting_dates(data: pd.DataFrame, date_method: str, rollyears: int=20):
"""
generate a list 4 tuples, one element for each year in the data
each tuple contains [fit_start, fit_end, period_start, period_end] datetime objects
the last period will be a 'stub' if we haven't got an exact number of years
date_method can be one of 'in_sample', 'expanding', 'rolling'
if 'rolling' then use rollyears variable
"""
print("*** USE METHOD IN SYSQUANT INSTEAD**")
if date_method not in ["in_sample", "rolling", "expanding"]:
raise Exception(
"don't recognise date_method %s should be one of in_sample, expanding, rolling" %
date_method)
if isinstance(data, list):
start_date = min([dataitem.index[0] for dataitem in data])
end_date = max([dataitem.index[-1] for dataitem in data])
else:
start_date = data.index[0]
end_date = data.index[-1]
# now generate the dates we use to fit
if date_method == "in_sample":
# single period
return [fit_dates_object(start_date, end_date, start_date, end_date)]
# generate list of dates, one year apart, including the final date
yearstarts = list(
pd.date_range(
start_date,
end_date,
freq="12M")) + [end_date]
# loop through each period
periods = []
for tidx in range(len(yearstarts))[1:-1]:
# these are the dates we test in
period_start = yearstarts[tidx]
period_end = yearstarts[tidx + 1]
# now generate the dates we use to fit
if date_method == "expanding":
fit_start = start_date
elif date_method == "rolling":
yearidx_to_use = max(0, tidx - rollyears)
fit_start = yearstarts[yearidx_to_use]
else:
raise Exception(
"don't recognise date_method %s should be one of in_sample, expanding, rolling" %
date_method)
if date_method in ["rolling", "expanding"]:
fit_end = period_start
else:
raise Exception("don't recognise date_method %s " % date_method)
periods.append(
fit_dates_object(
fit_start,
fit_end,
period_start,
period_end))
if date_method in ["rolling", "expanding"]:
# add on a dummy date for the first year, when we have no data
periods = [
fit_dates_object(
start_date, start_date, start_date, yearstarts[1], no_data=True
)
] + periods
return periods
def time_matches(
index_entry, closing_time=pd.DateOffset(hours=12, minutes=0, seconds=0)
):
if (
index_entry.hour == closing_time.hours
and index_entry.minute == closing_time.minutes
and index_entry.second == closing_time.seconds
):
return True
else:
return False
"""
Convert date into a decimal, and back again
"""
LONG_DATE_FORMAT = "%Y%m%d%H%M%S.%f"
LONG_TIME_FORMAT = "%H%M%S.%f"
LONG_JUST_DATE_FORMAT = "%Y%m%d"
CONVERSION_FACTOR = 10000
def datetime_to_long(date_to_convert: datetime.datetime)-> int:
as_str = date_to_convert.strftime(LONG_DATE_FORMAT)
as_float = float(as_str)
return int(as_float * CONVERSION_FACTOR)
def long_to_datetime(int_to_convert:int) -> datetime.datetime:
as_float = float(int_to_convert) / CONVERSION_FACTOR
str_to_convert = "%.6f" % as_float
# have to do this because of leap seconds
time_string, dot, microseconds = str_to_convert.partition(".")
utc_time_tuple = time.strptime(str_to_convert, LONG_DATE_FORMAT)
as_datetime = datetime.datetime(1970, 1, 1) + datetime.timedelta(
seconds=calendar.timegm(utc_time_tuple)
)
as_datetime = as_datetime.replace(
microsecond=datetime.datetime.strptime(microseconds, "%f").microsecond
)
return as_datetime
NOTIONAL_CLOSING_TIME = dict(hours=23, minutes=0, seconds=0)
NOTIONAL_CLOSING_TIME_AS_PD_OFFSET = pd.DateOffset(hours = NOTIONAL_CLOSING_TIME['hours'],
minutes = NOTIONAL_CLOSING_TIME['minutes'],
seconds = NOTIONAL_CLOSING_TIME['seconds'])
def adjust_timestamp_to_include_notional_close_and_time_offset(
timestamp: datetime.datetime,
actual_close: pd.DateOffset = NOTIONAL_CLOSING_TIME_AS_PD_OFFSET,
original_close: pd.DateOffset = pd.DateOffset(hours=23, minutes=0, seconds=0),
time_offset: pd.DateOffset = pd.DateOffset(hours=0),
) -> datetime.datetime:
if timestamp.hour == 0 and timestamp.minute == 0 and timestamp.second == 0:
new_datetime = timestamp.date() + actual_close
elif time_matches(timestamp, original_close):
new_datetime = timestamp.date() + actual_close
else:
new_datetime = timestamp + time_offset
return new_datetime
def strip_timezone_fromdatetime(timestamp_with_tz_info) -> datetime.datetime:
ts = timestamp_with_tz_info.timestamp()
new_timestamp = datetime.datetime.fromtimestamp(ts)
return new_timestamp
def get_datetime_input(prompt:str, allow_default:bool=True, allow_no_arg:bool=False):
invalid_input = True
input_str = (
prompt +
": Enter date and time in format %Y%-%m-%d eg '2020-05-30' OR '%Y-%m-%d %H:%M:%S' eg '2020-05-30 14:04:11'")
if allow_default:
input_str = input_str + " <RETURN for now>"
if allow_no_arg:
input_str = input_str + " <SPACE for no date>' "
while invalid_input:
ans = input(input_str)
if ans == "" and allow_default:
return datetime.datetime.now()
if ans == " " and allow_no_arg:
return None
try:
if len(ans) == 10:
return_datetime = datetime.datetime.strptime(ans, "%Y-%m-%d")
elif len(ans) == 19:
return_datetime = datetime.datetime.strptime(ans, "%Y-%m-%d %H:%M:%S")
else:
# problems formatting will also raise value error
raise ValueError
return return_datetime
except ValueError:
print("%s is not a valid datetime string" % ans)
continue
class tradingStartAndEndDateTimes(object):
def __init__(self, hour_tuple):
self._start_time = hour_tuple[0]
self._end_time = hour_tuple[1]
@property
def start_time(self):
return self._start_time
@property
def end_time(self):
return self._end_time
def okay_to_trade_now(self) -> bool:
datetime_now = datetime.datetime.now()
if datetime_now >= self.start_time and datetime_now <= self.end_time:
return True
else:
return False
def hours_left_before_market_close(self)->float:
if not self.okay_to_trade_now():
# market closed
return 0
datetime_now = datetime.datetime.now()
time_left = self.end_time - datetime_now
seconds_left = time_left.total_seconds()
hours_left = float(seconds_left) / SECONDS_PER_HOUR
return hours_left
def less_than_N_hours_left(self, N_hours: float = 1.0) -> bool:
hours_left = self.hours_left_before_market_close()
if hours_left<N_hours:
return True
else:
return False
class manyTradingStartAndEndDateTimes(list):
def __init__(self, list_of_trading_hours):
"""
:param list_of_trading_hours: list of tuples, both datetime, first is start and second is end
"""
list_of_start_and_end_objects = []
for hour_tuple in list_of_trading_hours:
this_period = tradingStartAndEndDateTimes(hour_tuple)
list_of_start_and_end_objects.append(this_period)
super().__init__(list_of_start_and_end_objects)
def okay_to_trade_now(self):
for check_period in self:
if check_period.okay_to_trade_now():
# okay to trade if it's okay to trade on some date
return True
return False
def less_than_N_hours_left(self, N_hours: float = 1.0):
for check_period in self:
if check_period.okay_to_trade_now():
# market is open, but for how long?
if check_period.less_than_N_hours_left(N_hours=N_hours):
return True
else:
return False
else:
# move on to next period
continue
# market closed, we treat that as 'less than one hour left'
return True
SHORT_DATE_PATTERN = "%m/%d %H:%M:%S"
MISSING_STRING_PATTERN = " ??? "
def last_run_or_heartbeat_from_date_or_none(last_run_or_heartbeat: datetime.datetime):
if last_run_or_heartbeat is missing_data or last_run_or_heartbeat is None:
last_run_or_heartbeat = MISSING_STRING_PATTERN
else:
last_run_or_heartbeat = last_run_or_heartbeat.strftime(
SHORT_DATE_PATTERN)
return last_run_or_heartbeat
date_formatting = "%Y%m%d_%H%M%S"
def create_datetime_string(datetime_to_use):
datetime_marker = datetime_to_use.strftime(date_formatting)
return datetime_marker
def from_marker_to_datetime(datetime_marker):
return datetime.datetime.strptime(datetime_marker, date_formatting)
def two_weeks_ago():
return n_days_ago(14)
def n_days_ago(n_days: int):
today = datetime.datetime.now()
d = datetime.timedelta(days = n_days)
return today - d
def adjust_trading_hours_conservatively(trading_hours: list,
conservative_times: tuple) -> list:
new_trading_hours = [adjust_single_day_conservatively(single_days_hours,
conservative_times)
for single_days_hours in trading_hours]
return new_trading_hours
def adjust_single_day_conservatively(single_days_hours: tuple,
conservative_times: tuple) -> tuple:
adjusted_start_datetime = adjust_start_time_conservatively(single_days_hours[0],
conservative_times[0])
adjusted_end_datetime = adjust_end_time_conservatively(single_days_hours[1],
conservative_times[1])
return (adjusted_start_datetime, adjusted_end_datetime)
def adjust_start_time_conservatively(start_datetime: datetime.datetime,
start_conservative: datetime.time) -> datetime.datetime:
start_conservative_datetime = adjust_date_conservatively(start_datetime,
start_conservative)
return max(start_datetime, start_conservative_datetime)
def adjust_end_time_conservatively(start_datetime: datetime.datetime,
start_conservative: datetime.time) -> datetime.datetime:
start_conservative_datetime = adjust_date_conservatively(start_datetime,
start_conservative)
return max(start_datetime, start_conservative_datetime)
def adjust_date_conservatively(datetime_to_be_adjusted: datetime.datetime,
conservative_time: datetime.time) -> datetime.datetime:
return datetime.datetime.combine(datetime_to_be_adjusted.date(), conservative_time)
| gpl-3.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/matplotlib/tests/test_bbox_tight.py | 4 | 3861 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from distutils.version import LooseVersion
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
import numpy as np
from matplotlib import rcParams
from matplotlib.testing.decorators import image_comparison
from matplotlib.testing.noseclasses import KnownFailureTest
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import matplotlib.patches as mpatches
from matplotlib.ticker import FuncFormatter
@image_comparison(baseline_images=['bbox_inches_tight'], remove_text=True,
savefig_kwarg=dict(bbox_inches='tight'), tol=15)
def test_bbox_inches_tight():
#: Test that a figure saved using bbox_inches='tight' is clipped correctly
data = [[ 66386, 174296, 75131, 577908, 32015],
[ 58230, 381139, 78045, 99308, 160454],
[ 89135, 80552, 152558, 497981, 603535],
[ 78415, 81858, 150656, 193263, 69638],
[139361, 331509, 343164, 781380, 52269]]
colLabels = rowLabels = [''] * 5
rows = len(data)
ind = np.arange(len(colLabels)) + 0.3 # the x locations for the groups
cellText = []
width = 0.4 # the width of the bars
yoff = np.array([0.0] * len(colLabels))
# the bottom values for stacked bar chart
fig, ax = plt.subplots(1, 1)
for row in xrange(rows):
plt.bar(ind, data[row], width, bottom=yoff)
yoff = yoff + data[row]
cellText.append([''])
plt.xticks([])
plt.legend([''] * 5, loc=(1.2, 0.2))
# Add a table at the bottom of the axes
cellText.reverse()
the_table = plt.table(cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels, loc='bottom')
@image_comparison(baseline_images=['bbox_inches_tight_suptile_legend'],
remove_text=False, savefig_kwarg={'bbox_inches': 'tight'})
def test_bbox_inches_tight_suptile_legend():
plt.plot(list(xrange(10)), label='a straight line')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, )
plt.title('Axis title')
plt.suptitle('Figure title')
# put an extra long y tick on to see that the bbox is accounted for
def y_formatter(y, pos):
if int(y) == 4:
return 'The number 4'
else:
return str(y)
plt.gca().yaxis.set_major_formatter(FuncFormatter(y_formatter))
plt.xlabel('X axis')
@image_comparison(baseline_images=['bbox_inches_tight_clipping'],
remove_text=True, savefig_kwarg={'bbox_inches': 'tight'})
def test_bbox_inches_tight_clipping():
# tests bbox clipping on scatter points, and path clipping on a patch
# to generate an appropriately tight bbox
plt.scatter(list(xrange(10)), list(xrange(10)))
ax = plt.gca()
ax.set_xlim([0, 5])
ax.set_ylim([0, 5])
# make a massive rectangle and clip it with a path
patch = mpatches.Rectangle([-50, -50], 100, 100,
transform=ax.transData,
facecolor='blue', alpha=0.5)
path = mpath.Path.unit_regular_star(5).deepcopy()
path.vertices *= 0.25
patch.set_clip_path(path, transform=ax.transAxes)
plt.gcf().artists.append(patch)
@image_comparison(baseline_images=['bbox_inches_tight_raster'],
remove_text=True, savefig_kwarg={'bbox_inches': 'tight'})
def test_bbox_inches_tight_raster():
"""Test rasterization with tight_layout"""
if LooseVersion(np.__version__) >= LooseVersion('1.11.0'):
raise KnownFailureTest("Fall out from a fixed numpy bug")
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1.0, 2.0], rasterized=True)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |