repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ratnania/pigasus | doc/manual/include/demo/test_neumann_quartcircle.py | 1 | 2730 | #! /usr/bin/python
# ...
try:
from matplotlib import pyplot as plt
PLOT=True
except ImportError:
PLOT=False
# ...
import numpy as np
from pigasus.gallery.poisson import *
import sys
import inspect
filename = inspect.getfile(inspect.currentframe()) # script filename (usually with path)
# ...
sin = np.sin ; cos = np.cos ; pi = np.pi ; exp = np.exp
# ...
#-----------------------------------
try:
nx = int(sys.argv[1])
except:
nx = 31
try:
ny = int(sys.argv[2])
except:
ny = 31
try:
px = int(sys.argv[3])
except:
px = 2
try:
py = int(sys.argv[4])
except:
py = 2
from igakit.cad_geometry import quart_circle as domain
geo = domain(n=[nx,ny],p=[px,py])
#-----------------------------------
# ...
# exact solution
# ...
R = 1.
r = 0.5
c = 1. # for neumann
#c = pi / (R**2-r**2) # for all dirichlet bc
u = lambda x,y : [ x * y * sin ( c * (R**2 - x**2 - y**2 )) ]
# ...
# ...
# rhs
# ...
f = lambda x,y : [4*c**2*x**3*y*sin(c*(R**2 - x**2 - y**2)) \
+ 4*c**2*x*y**3*sin(c*(R**2 - x**2 - y**2)) \
+ 12*c*x*y*cos(c*(R**2 - x**2 - y**2)) ]
# ...
# ...
# values of gradu.n at the boundary
# ...
gradu = lambda x,y : [-2*c*x**2*y*cos(c*(R**2 - x**2 - y**2)) + y*sin(c*(R**2
-
x**2
-
y**2)) \
,-2*c*x*y**2*cos(c*(R**2 - x**2 - y**2)) + x*sin(c*(R**2 - x**2 - y**2)) ]
def func_g (x,y) :
du = gradu (x, y)
return [ du[0] , du[1] ]
# ...
# ...
# values of u at the boundary
# ...
bc_neumann={}
bc_neumann [0,0] = func_g
Dirichlet = [[1,2,3]]
#AllDirichlet = True
# ...
# ...
try:
bc_dirichlet
except NameError:
bc_dirichlet = None
else:
pass
try:
bc_neumann
except NameError:
bc_neumann = None
else:
pass
try:
AllDirichlet
except NameError:
AllDirichlet = None
else:
pass
try:
Dirichlet
except NameError:
Dirichlet = None
else:
pass
try:
Metric
except NameError:
Metric = None
else:
pass
# ...
# ...
PDE = poisson(geometry=geo, bc_dirichlet=bc_dirichlet, bc_neumann=bc_neumann,
AllDirichlet=AllDirichlet, Dirichlet=Dirichlet,metric=Metric)
# ...
# ...
PDE.assembly(f=f)
PDE.solve()
# ...
# ...
normU = PDE.norm(exact=u)
print "norm U = ", normU
# ...
# ...
if PLOT:
PDE.plot() ; plt.colorbar(); plt.title('$u_h$')
plt.savefig(filename.split('.py')[0]+'.png', format='png')
plt.clf()
# ...
PDE.free()
| mit |
devanshdalal/scikit-learn | examples/gaussian_process/plot_gpr_noisy_targets.py | 64 | 3706 | """
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression example computed in two different ways:
1. A noise-free case
2. A noisy case with known noise-level per datapoint
In both cases, the kernel's parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``alpha`` is applied as a Tikhonov
regularization of the assumed covariance between the training points.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>s
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
# ----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'r.', markersize=10, label=u'Observations')
plt.plot(x, y_pred, 'b-', label=u'Prediction')
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
# ----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Instanciate a Gaussian Process model
gp = GaussianProcessRegressor(kernel=kernel, alpha=(dy / y) ** 2,
n_restarts_optimizer=10)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
plt.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
plt.plot(x, y_pred, 'b-', label=u'Prediction')
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
lordkman/burnman | examples/example_geotherms.py | 4 | 4049 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_geotherms
-----------------
This example shows each of the geotherms currently possible with BurnMan.
These are:
1. Brown and Shankland, 1981 :cite:`Brown1981`
2. Anderson, 1982 :cite:`anderson1982earth`
3. Watson and Baxter, 2007 :cite:`Watson2007`
4. linear extrapolation
5. Read in from file from user
6. Adiabatic from potential temperature and choice of mineral
*Uses:*
* :func:`burnman.geotherm.brown_shankland`
* :func:`burnman.geotherm.anderson`
* input geotherm file *input_geotherm/example_geotherm.txt* (optional)
* :class:`burnman.composite.Composite` for adiabat
*Demonstrates:*
* the available geotherms
"""
from __future__ import absolute_import
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
# hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1, os.path.abspath('..'))
import burnman
from burnman import minerals
if __name__ == "__main__":
# we want to evaluate several geotherms at these values
pressures = np.arange(9.0e9, 128e9, 3e9)
seismic_model = burnman.seismic.PREM()
depths = seismic_model.depth(pressures)
# load two builtin geotherms and evaluate the temperatures at all pressures
temperature1 = burnman.geotherm.brown_shankland(depths)
temperature2 = burnman.geotherm.anderson(depths)
# a geotherm is actually just a function that returns a list of temperatures given pressures in Pa
# so we can just write our own function
my_geotherm_function = lambda p: [1500 + (2500 - 1500) * x / 128e9 for x in p]
temperature3 = my_geotherm_function(pressures)
# what about a geotherm defined from datapoints given in a file (our
# inline)?
table = [[1e9, 1600], [30e9, 1700], [130e9, 2700]]
# this could also be loaded from a file, just uncomment this
# table = burnman.tools.read_table("input_geotherm/example_geotherm.txt")
table_pressure = np.array(table)[:, 0]
table_temperature = np.array(table)[:, 1]
my_geotherm_interpolate = lambda p: [np.interp(x, table_pressure,
table_temperature) for x in p]
temperature4 = my_geotherm_interpolate(pressures)
# finally, we can also calculate a self consistent
# geotherm for an assemblage of minerals
# based on self compression of the composite rock.
# First we need to define an assemblage
amount_perovskite = 0.8
fe_pv = 0.05
fe_pc = 0.2
pv = minerals.SLB_2011.mg_fe_perovskite()
pc = minerals.SLB_2011.ferropericlase()
pv.set_composition([1. - fe_pv, fe_pv, 0.])
pc.set_composition([1. - fe_pc, fe_pc])
example_rock = burnman.Composite(
[pv, pc], [amount_perovskite, 1.0 - amount_perovskite])
# next, define an anchor temperature at which we are starting.
# Perhaps 1500 K for the upper mantle
T0 = 1500.
# then generate temperature values using the self consistent function.
# This takes more time than the above methods
temperature5 = burnman.geotherm.adiabatic(pressures, T0, example_rock)
# you can also look at burnman/geotherm.py to see how the geotherms are
# implemented
plt.plot(pressures / 1e9, temperature1, '-r', label="Brown, Shankland")
plt.plot(pressures / 1e9, temperature2, '-c', label="Anderson")
plt.plot(pressures / 1e9, temperature3, '-b', label="handwritten linear")
plt.plot(pressures / 1e9, temperature4,
'-k', label="handwritten from table")
plt.plot(pressures / 1e9, temperature5, '-m',
label="Adiabat with pv (70%) and fp(30%)")
plt.legend(loc='lower right')
plt.xlim([8.5, 130])
plt.xlabel('Pressure/GPa')
plt.ylabel('Temperature')
plt.savefig("output_figures/example_geotherm.png")
plt.show()
| gpl-2.0 |
francesco-mannella/dmp-esn | parametric/parametric_dmp/bin/tr_datasets/e_cursive_curves_angles_start_none/results/plot.py | 18 | 1043 | #!/usr/bin/env python
import glob
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
pathname = os.path.dirname(sys.argv[0])
if pathname:
os.chdir(pathname)
n_dim = None
trains = []
for fname in glob.glob("tl*"):
t = np.loadtxt(fname)
trains.append(t)
tests = []
for fname in glob.glob("tt*"):
t = np.loadtxt(fname)
tests.append(t)
trial_results= []
for fname in glob.glob("rtl*"):
t = np.loadtxt(fname)
trial_results.append(t)
test_results= []
for fname in glob.glob("rtt*"):
t = np.loadtxt(fname)
test_results.append(t)
fig = plt.figure()
ax = fig.add_subplot(111, aspect="equal")
for d in trains:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color="blue", lw=3, alpha=0.5)
for d in tests:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color="red", lw=3, alpha=0.5)
for d in trial_results:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=[0,0,.5], lw=2)
for d in test_results:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=[.5,0,0], lw=2)
plt.show()
| gpl-2.0 |
flowersteam/SESM | SESM/pykinect.py | 2 | 3387 | import zmq
import numpy
import threading
from collections import namedtuple
Point2D = namedtuple('Point2D', ('x', 'y'))
Point3D = namedtuple('Point3D', ('x', 'y', 'z'))
Quaternion = namedtuple('Quaternion', ('x', 'y', 'z', 'w'))
torso_joints = ('hip_center', 'spine', 'shoulder_center', 'head')
left_arm_joints = ('shoulder_left', 'elbow_left', 'wrist_left', 'hand_left')
right_arm_joints = ('shoulder_right', 'elbow_right', 'wrist_right', 'hand_right')
left_leg_joints = ('hip_left', 'knee_left', 'ankle_left', 'foot_left')
right_leg_joints = ('hip_right', 'knee_right', 'ankle_right', 'foot_right')
skeleton_joints = torso_joints + left_arm_joints + right_arm_joints + left_leg_joints + right_leg_joints
class Skeleton(namedtuple('Skeleton', ('timestamp', 'user_id') + skeleton_joints)):
joints = skeleton_joints
@property
def to_np(self):
l = []
for j in self.joints:
p = getattr(self, j).position
l.append((p.x, p.y, p.z))
return numpy.array(l)
Joint = namedtuple('Joint', ('position', 'orientation', 'pixel_coordinate'))
class KinectSensor(object):
def __init__(self, addr, port):
self._lock = threading.Lock()
self._skeleton = None
context = zmq.Context()
self.socket = context.socket(zmq.REQ)
self.socket.connect('tcp://{}:{}'.format(addr, port))
t = threading.Thread(target=self.get_skeleton)
t.daemon = True
t.start()
@property
def tracked_skeleton(self):
with self._lock:
return self._skeleton
@tracked_skeleton.setter
def tracked_skeleton(self, skeleton):
with self._lock:
self._skeleton = skeleton
def get_skeleton(self):
while True:
self.socket.send('Hello')
md = self.socket.recv_json()
msg = self.socket.recv()
skeleton_array = numpy.frombuffer(buffer(msg), dtype=md['dtype'])
skeleton_array = skeleton_array.reshape(md['shape'])
joints = []
for i in range(len(skeleton_joints)):
x, y, z, w = skeleton_array[i][0:4]
position = Point3D(x / w, y / w, z / w)
pixel_coord = Point2D(*skeleton_array[i][4:6])
orientation = Quaternion(*skeleton_array[i][6:10])
joints.append(Joint(position, orientation, pixel_coord))
self.tracked_skeleton = Skeleton(md['timestamp'], md['user_index'], *joints)
def draw_position(skel, ax):
xy, zy = [], []
if not skel:
return
for j in skeleton_joints:
p = getattr(skel, j).position
xy.append((p.x, p.y))
zy.append((p.z, p.y))
ax.set_xlim(-2, 5)
ax.set_ylim(-1.5, 1.5)
ax.scatter(zip(*xy)[0], zip(*xy)[1], 30, 'b')
ax.scatter(zip(*zy)[0], zip(*zy)[1], 30, 'r')
if __name__ == '__main__':
import time
import matplotlib.pyplot as plt
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
kinect_sensor = KinectSensor('193.50.110.210', 9999)
import skelangle
kinect_angle = skelangle.AngleFromSkel()
try:
while True:
ax.clear()
draw_position(kinect_sensor.tracked_skeleton, ax)
plt.draw()
time.sleep(0.1)
except KeyboardInterrupt:
plt.close('all')
| gpl-3.0 |
gwparikh/cvgui | grouping_calibration.py | 2 | 9402 | #!/usr/bin/env python
import os, sys, subprocess
import argparse
import subprocess
import threading
import timeit
from multiprocessing import Queue, Lock
from configobj import ConfigObj
from numpy import loadtxt
from numpy.linalg import inv
import matplotlib.pyplot as plt
import moving
from cvguipy import trajstorage, cvgenetic, cvconfig
"""
Grouping Calibration By Genetic Algorithm.
This script uses genetic algorithm to search for the best configuration.
It does not monitor RAM usage, therefore, CPU thrashing might be happened when number of parents (selection size) is too large.
"""
# class for genetic algorithm
class GeneticCompare(object):
def __init__(self, motalist, motplist, IDlist, cfg_list, lock):
self.motalist = motalist
self.motplist = motplist
self.IDlist = IDlist
self.cfg_list = cfg_list
self.lock = lock
# This is used for calculte fitness of individual in genetic algorithn.
# It is modified to create sqlite and cfg file before tuning computeClearMOT.
# NOTE errors show up when loading two same ID
def computeMOT(self, i):
# create sqlite and cfg file with id i
cfg_name = config_files +str(i)+'.cfg'
sql_name = sqlite_files +str(i)+'.sqlite'
open(cfg_name,'w').close()
config = ConfigObj(cfg_name)
cfg_list.write_config(i ,config)
command = ['cp', 'tracking_only.sqlite', sql_name]
process = subprocess.Popen(command)
process.wait()
command = ['trajextract.py', args.inputVideo, '-o', args.homography, '-t', cfg_name, '-d', sql_name, '--gf']
# suppress output of grouping extraction
devnull = open(os.devnull, 'wb')
process = subprocess.Popen(command, stdout = devnull)
process.wait()
obj = trajstorage.CVsqlite(sql_name)
print "loading", i
obj.loadObjects()
motp, mota, mt, mme, fpt, gt = moving.computeClearMOT(cdb.annotations, obj.objects, args.matchDistance, firstFrame, lastFrame)
if motp is None:
motp = 0
self.lock.acquire()
self.IDlist.put(i)
self.motplist.put(motp)
self.motalist.put(mota)
obj.close()
if args.PrintMOTA:
print("ID: mota:{} motp:{}".format(mota, motp))
self.lock.release()
return mota
if __name__ == '__main__' :
parser = argparse.ArgumentParser(description="compare all sqlites that are created by cfg_combination.py to the Annotated version to find the ID of the best configuration")
parser.add_argument('inputVideo', help= "input video filename")
parser.add_argument('-r', '--configuration-file', dest='range_cfg', help= "the configuration-file contain the range of configuration")
parser.add_argument('-t', '--traffintel-config', dest='traffintelConfig', help= "the TrafficIntelligence file to use for running the first extraction.")
parser.add_argument('-m', '--mask-File', dest='maskFilename', help="Name of the mask-File for trajextract")
parser.add_argument('-d', '--database-file', dest ='databaseFile', help ="Name of the databaseFile.")
parser.add_argument('-o', '--homography-file', dest ='homography', help = "Name of the homography file.", required = True)
parser.add_argument('-md', '--matching-distance', dest='matchDistance', help = "matchDistance", default = 10, type = float)
parser.add_argument('-a', '--accuracy', dest = 'accuracy', help = "accuracy parameter for genetic algorithm", type = int)
parser.add_argument('-p', '--population', dest = 'population', help = "population parameter for genetic algorithm", required = True, type = int)
parser.add_argument('-np', '--num-of-parents', dest = 'num_of_parents', help = "Number of parents that are selected each generation", type = int)
parser.add_argument('-mota', '--print-MOTA', dest='PrintMOTA', action = 'store_true', help = "Print MOTA for each ID.")
args = parser.parse_args()
os.mkdir('cfg_files')
os.mkdir('sql_files')
sqlite_files = "sql_files/Sqlite_ID_"
config_files = "cfg_files/Cfg_ID_"
# ------------------initialize annotated version if not existed ---------- #
# inputVideo check
if not os.path.exists(args.inputVideo):
print("Input video {} does not exist! Exiting...".format(args.inputVideo))
sys.exit(1)
# configuration file check
if args.range_cfg is None:
config = ConfigObj('range.cfg')
else:
config = ConfigObj(args.range_cfg)
# get configuration and put them to a List
cfg_list = cvconfig.CVConfigList()
thread_cfgtolist = threading.Thread(target = cvconfig.config_to_list, args = (cfg_list, config))
thread_cfgtolist.start();
# check if dbfile name is entered
if args.databaseFile is None:
print("Database-file is not entered, running trajextract and cvplayer.")
if not os.path.exists(args.homography):
print("Homography file does not exist! Exiting...")
sys.exit(1)
else:
videofile=args.inputVideo
if 'avi' in videofile:
if args.maskFilename is not None:
command = ['trajextract.py',args.inputVideo,'-m', args.maskFilename,'-o', args.homography]
else:
command = ['trajextract.py',args.inputVideo,'-o', args.homography]
process = subprocess.Popen(command)
process.wait()
databaseFile = videofile.replace('avi','sqlite')
command = ['cvplayer.py',args.inputVideo,'-d',databaseFile,'-o',args.homography]
process = subprocess.Popen(command)
process.wait()
else:
print("Input video {} is not 'avi' type. Exiting...".format(args.inputVideo))
sys.exit(1)
else:
databaseFile = args.databaseFile
thread_cfgtolist.join()
# ------------------Done initialization for annotation-------------------- #
# create first tracking only database template.
print("creating the first tracking only database template.")
if args.maskFilename is not None:
command = map(str, ['trajextract.py',args.inputVideo, '-d', 'tracking_only.sqlite', '-t', args.traffintelConfig, '-o', args.homography, '-m', args.maskFilename, '--tf'])
else:
command = map(str, ['trajextract.py',args.inputVideo, '-d', sql_name, '-t', args.traffintelConfig, '-o', args.homography, '--tf'])
process = subprocess.Popen(command)
process.wait()
# ----start using genetic algorithm to search for best configuration-------#
start = timeit.default_timer()
dbfile = databaseFile;
homography = loadtxt(args.homography)
cdb = trajstorage.CVsqlite(dbfile)
cdb.open()
cdb.getLatestAnnotation()
cdb.createBoundingBoxTable(cdb.latestannotations, inv(homography))
cdb.loadAnnotaion()
for a in cdb.annotations:
a.computeCentroidTrajectory(homography)
print "Latest Annotaions in "+dbfile+": ", cdb.latestannotations
cdb.frameNumbers = cdb.getFrameList()
firstFrame = cdb.frameNumbers[0]
lastFrame = cdb.frameNumbers[-1]
foundmota = Queue()
foundmotp = Queue()
IDs = Queue()
lock = Lock()
Comp = GeneticCompare(foundmota, foundmotp, IDs, cfg_list, lock)
if args.accuracy != None:
GeneticCal = cvgenetic.CVGenetic(args.population, cfg_list, Comp.computeMOT, args.accuracy)
else:
GeneticCal = cvgenetic.CVGenetic(args.population, cfg_list, Comp.computeMOT)
if args.num_of_parents != None:
GeneticCal.run_thread(args.num_of_parents)
else:
GeneticCal.run_thread()
# tranform queues to lists
foundmota = cvgenetic.Queue_to_list(foundmota)
foundmotp = cvgenetic.Queue_to_list(foundmotp)
IDs = cvgenetic.Queue_to_list(IDs)
for i in range(len(foundmotp)):
foundmotp[i] /= args.matchDistance
Best_mota = max(foundmota)
Best_ID = IDs[foundmota.index(Best_mota)]
print "Best multiple object tracking accuracy (MOTA)", Best_mota
print "ID:", Best_ID
stop = timeit.default_timer()
print str(stop-start) + "s"
total = []
for i in range(len(foundmota)):
total.append(foundmota[i]- 0.1 * foundmotp[i])
Best_total = max(total)
Best_total_ID = IDs[total.index(Best_total)]
# ------------------------------Done searching----------------------------#
# use matplot to plot a graph of all calculated IDs along with thier mota
plt.figure(1)
plt.plot(foundmota ,IDs ,'bo')
plt.plot(foundmotp ,IDs ,'yo')
plt.plot(Best_mota, Best_ID, 'ro')
plt.axis([-1, 1, -1, cfg_list.get_total_combination()])
plt.xlabel('mota')
plt.ylabel('ID')
plt.title(b'Best MOTA: '+str(Best_mota) +'\nwith ID: '+str(Best_ID))
plotFile = os.path.splitext(dbfile)[0] + '_CalibrationResult_mota.png'
plt.savefig(plotFile)
plt.figure(2)
plt.plot(total, IDs, 'bo')
plt.plot(Best_total, Best_total_ID, 'ro')
plt.xlabel('mota + motp')
plt.ylabel('ID')
plt.title(b'Best total: '+str(Best_total) +'\nwith ID: '+str(Best_total_ID))
# save the plot
plotFile = os.path.splitext(dbfile)[0] + '_CalibrationResult_motp.png'
plt.savefig(plotFile)
plt.show()
cdb.close()
| mit |
keflavich/pyspeckit-obsolete | pyspeckit/spectrum/models/ammonia.py | 1 | 28836 | """
========================================
Ammonia inversion transition TKIN fitter
========================================
Ammonia inversion transition TKIN fitter translated from Erik Rosolowsky's
http://svn.ok.ubc.ca/svn/signals/nh3fit/
.. moduleauthor:: Adam Ginsburg <adam.g.ginsburg@gmail.com>
Module API
^^^^^^^^^^
"""
import numpy as np
from pyspeckit.mpfit import mpfit
from pyspeckit.spectrum.parinfo import ParinfoList,Parinfo
import fitter
import matplotlib.cbook as mpcb
import copy
import model
line_names = ['oneone','twotwo','threethree','fourfour']
freq_dict = {
'oneone': 23.694506e9,
'twotwo': 23.722633335e9,
'threethree': 23.8701296e9,
'fourfour': 24.1394169e9,
}
aval_dict = {
'oneone': 1.712e-7, #64*!pi**4/(3*h*c**3)*nu11**3*mu0**2*(1/2.)
'twotwo': 2.291e-7, #64*!pi**4/(3*h*c**3)*nu22**3*mu0**2*(2/3.)
'threethree': 2.625e-7, #64*!pi**4/(3*h*c**3)*nu33**3*mu0**2*(3/4.)
'fourfour': 3.167e-7, #64*!pi**4/(3*h*c**3)*nu44**3*mu0**2*(4/5.)
}
ortho_dict = {
'oneone': False,
'twotwo': False,
'threethree': True,
'fourfour': False,
}
n_ortho = np.arange(0,28,3) # 0..3..27
n_para = np.array([x for x in range(28) if x % 3 != 0])
voff_lines_dict = {
'oneone': [19.8513, 19.3159, 7.88669, 7.46967, 7.35132, 0.460409, 0.322042,
-0.0751680, -0.213003, 0.311034, 0.192266, -0.132382, -0.250923, -7.23349,
-7.37280, -7.81526, -19.4117, -19.5500],
'twotwo':[26.5263, 26.0111, 25.9505, 16.3917, 16.3793, 15.8642, 0.562503,
0.528408, 0.523745, 0.0132820, -0.00379100, -0.0132820, -0.501831,
-0.531340, -0.589080, -15.8547, -16.3698, -16.3822, -25.9505, -26.0111,
-26.5263],
'threethree':[29.195098, 29.044147, 28.941877, 28.911408, 21.234827,
21.214619, 21.136387, 21.087456, 1.005122, 0.806082, 0.778062,
0.628569, 0.016754, -0.005589, -0.013401, -0.639734, -0.744554,
-1.031924, -21.125222, -21.203441, -21.223649, -21.076291, -28.908067,
-28.938523, -29.040794, -29.191744],
'fourfour':[ 0. , -30.49783692, 30.49783692, 0., 24.25907811,
-24.25907811, 0. ]
}
tau_wts_dict = {
'oneone': [0.0740740, 0.148148, 0.0925930, 0.166667, 0.0185190, 0.0370370,
0.0185190, 0.0185190, 0.0925930, 0.0333330, 0.300000, 0.466667,
0.0333330, 0.0925930, 0.0185190, 0.166667, 0.0740740, 0.148148],
'twotwo': [0.00418600, 0.0376740, 0.0209300, 0.0372090, 0.0260470,
0.00186000, 0.0209300, 0.0116280, 0.0106310, 0.267442, 0.499668,
0.146512, 0.0116280, 0.0106310, 0.0209300, 0.00186000, 0.0260470,
0.0372090, 0.0209300, 0.0376740, 0.00418600],
'threethree': [0.012263, 0.008409, 0.003434, 0.005494, 0.006652, 0.008852,
0.004967, 0.011589, 0.019228, 0.010387, 0.010820, 0.009482, 0.293302,
0.459109, 0.177372, 0.009482, 0.010820, 0.019228, 0.004967, 0.008852,
0.006652, 0.011589, 0.005494, 0.003434, 0.008409, 0.012263],
'fourfour': [0.2431, 0.0162, 0.0162, 0.3008, 0.0163, 0.0163, 0.3911]}
def ammonia(xarr, tkin=20, tex=None, ntot=1e14, width=1,
xoff_v=0.0, fortho=0.0, tau=None, fillingfraction=None, return_tau=False,
thin=False, verbose=False, return_components=False, debug=False ):
"""
Generate a model Ammonia spectrum based on input temperatures, column, and
gaussian parameters
ntot can be specified as a column density (e.g., 10^15) or a log-column-density (e.g., 15)
tex can be specified or can be assumed LTE if unspecified, if tex>tkin, or if "thin"
is specified
"thin" uses a different parametetrization and requires only the optical depth, width, offset,
and tkin to be specified. In the 'thin' approximation, tex is not used in computation of
the partition function - LTE is implicitly assumed
If tau is specified, ntot is NOT fit but is set to a fixed value
fillingfraction is an arbitrary scaling factor to apply to the model
fortho is the ortho/(ortho+para) fraction. The default is to assume all ortho.
xoff_v is the velocity offset in km/s
tau refers to the optical depth of the 1-1 line. The optical depths of the
other lines are fixed relative to tau_oneone
(not implemented) if tau is specified, ntot is ignored
"""
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('GHz')
if tex is not None:
if tex > tkin: # cannot have Tex > Tkin
tex = tkin
elif thin: # tex is not used in this case
tex = tkin
else:
tex = tkin
if thin:
ntot = 1e15
elif 5 < ntot < 25:
# allow ntot to be specified as a logarithm. This is
# safe because ntot < 1e10 gives a spectrum of all zeros, and the
# plausible range of columns is not outside the specified range
ntot = 10**ntot
elif (25 < ntot < 1e5) or (ntot < 5):
# these are totally invalid for log/non-log
return 0
# fillingfraction is an arbitrary scaling for the data
# The model will be (normal model) * fillingfraction
if fillingfraction is None:
fillingfraction = 1.0
ckms = 2.99792458e5
ccms = ckms*1e5
g1 = 1
g2 = 1
h = 6.6260693e-27
kb = 1.3806505e-16
mu0 = 1.476e-18 # Dipole Moment in cgs (1.476 Debeye)
# Generate Partition Functions
nlevs = 51
jv=np.arange(nlevs)
ortho = jv % 3 == 0
para = True-ortho
Jpara = jv[para]
Jortho = jv[ortho]
Brot = 298117.06e6
Crot = 186726.36e6
runspec = np.zeros(len(xarr))
tau_dict = {}
para_count = 0
ortho_count = 1 # ignore 0-0
if tau is not None and thin:
"""
Use optical depth in the 1-1 line as a free parameter
The optical depths of the other lines are then set by the kinetic temperature
Tex is still a free parameter in the final spectrum calculation at the bottom
(technically, I think this process assumes LTE; Tex should come into play in
these equations, not just the final one)
"""
dT0 = 41.5 # Energy diff between (2,2) and (1,1) in K
trot = tkin/(1+tkin/dT0*np.log(1+0.6*np.exp(-15.7/tkin)))
tau_dict['oneone'] = tau
tau_dict['twotwo'] = tau*(23.722/23.694)**2*4/3.*5/3.*np.exp(-41.5/trot)
tau_dict['threethree'] = tau*(23.8701279/23.694)**2*3/2.*14./3.*np.exp(-101.1/trot)
tau_dict['fourfour'] = tau*(24.1394169/23.694)**2*8/5.*9/3.*np.exp(-177.34/trot)
else:
"""
Column density is the free parameter. It is used in conjunction with
the full partition function to compute the optical depth in each band
Given the complexity of these equations, it would be worth my while to
comment each step carefully.
"""
Zpara = (2*Jpara+1)*np.exp(-h*(Brot*Jpara*(Jpara+1)+
(Crot-Brot)*Jpara**2)/(kb*tkin))
Zortho = 2*(2*Jortho+1)*np.exp(-h*(Brot*Jortho*(Jortho+1)+
(Crot-Brot)*Jortho**2)/(kb*tkin))
for linename in line_names:
if ortho_dict[linename]:
orthoparafrac = fortho
Z = Zortho
count = ortho_count
ortho_count += 1
else:
orthoparafrac = 1.0-fortho
Z = Zpara
count = para_count # need to treat partition function separately
para_count += 1
tau_dict[linename] = (ntot * orthoparafrac * Z[count]/(Z.sum()) / ( 1
+ np.exp(-h*freq_dict[linename]/(kb*tkin) )) * ccms**2 /
(8*np.pi*freq_dict[linename]**2) * aval_dict[linename]*
(1-np.exp(-h*freq_dict[linename]/(kb*tex))) /
(width/ckms*freq_dict[linename]*np.sqrt(2*np.pi)) )
# allow tau(11) to be specified instead of ntot
# in the thin case, this is not needed: ntot plays no role
# this process allows you to specify tau without using the approximate equations specified
# above. It should remove ntot from the calculations anyway...
if tau is not None and not thin:
tau11_temp = tau_dict['oneone']
# re-scale all optical depths so that tau is as specified, but the relative taus
# are sest by the kinetic temperature and partition functions
for linename,t in tau_dict.iteritems():
tau_dict[linename] = t * tau/tau11_temp
components =[]
for linename in line_names:
voff_lines = np.array(voff_lines_dict[linename])
tau_wts = np.array(tau_wts_dict[linename])
lines = (1-voff_lines/ckms)*freq_dict[linename]/1e9
tau_wts = tau_wts / (tau_wts).sum()
nuwidth = np.abs(width/ckms*lines)
nuoff = xoff_v/ckms*lines
# tau array
tauprof = np.zeros(len(xarr))
for kk,no in enumerate(nuoff):
tauprof += (tau_dict[linename] * tau_wts[kk] *
np.exp(-(xarr+no-lines[kk])**2 / (2.0*nuwidth[kk]**2)) *
fillingfraction)
components.append( tauprof )
T0 = (h*xarr*1e9/kb) # "temperature" of wavelength
if tau is not None and thin:
#runspec = tauprof+runspec
# is there ever a case where you want to ignore the optical depth function? I think no
runspec = (T0/(np.exp(T0/tex)-1)-T0/(np.exp(T0/2.73)-1))*(1-np.exp(-tauprof))+runspec
else:
runspec = (T0/(np.exp(T0/tex)-1)-T0/(np.exp(T0/2.73)-1))*(1-np.exp(-tauprof))+runspec
if runspec.min() < 0:
raise ValueError("Model dropped below zero. That is not possible normally. Here are the input values: "+
("tex: %f " % tex) +
("tkin: %f " % tkin) +
("ntot: %f " % ntot) +
("width: %f " % width) +
("xoff_v: %f " % xoff_v) +
("fortho: %f " % fortho)
)
if verbose or debug:
print "tkin: %g tex: %g ntot: %g width: %g xoff_v: %g fortho: %g fillingfraction: %g" % (tkin,tex,ntot,width,xoff_v,fortho,fillingfraction)
if return_components:
return (T0/(np.exp(T0/tex)-1)-T0/(np.exp(T0/2.73)-1))*(1-np.exp(-1*np.array(components)))
if return_tau:
return tau_dict
return runspec
class ammonia_model(model.SpectralModel):
def __init__(self,npeaks=1,npars=6,multisingle='multi',**kwargs):
self.npeaks = npeaks
self.npars = npars
self._default_parnames = ['tkin','tex','ntot','width','xoff_v','fortho']
self.parnames = copy.copy(self._default_parnames)
# all fitters must have declared modelfuncs, which should take the fitted pars...
self.modelfunc = ammonia
self.n_modelfunc = self.n_ammonia
# for fitting ammonia simultaneously with a flat background
self.onepeakammonia = fitter.vheightmodel(ammonia)
#self.onepeakammoniafit = self._fourparfitter(self.onepeakammonia)
if multisingle in ('multi','single'):
self.multisingle = multisingle
else:
raise Exception("multisingle must be multi or single")
self.default_parinfo = None
self.default_parinfo, kwargs = self._make_parinfo(**kwargs)
# enforce ammonia-specific parameter limits
for par in self.default_parinfo:
if 'tex' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],2.73), par.limits[1])
if 'tkin' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],2.73), par.limits[1])
if 'width' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],0), par.limits[1])
if 'fortho' in par.parname.lower():
par.limited = (True,True)
if par.limits[1] != 0:
par.limits = (max(par.limits[0],0), min(par.limits[1],1))
else:
par.limits = (max(par.limits[0],0), 1)
if 'ntot' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],0), par.limits[1])
self.parinfo = copy.copy(self.default_parinfo)
self.modelfunc_kwargs = kwargs
# lower case? self.modelfunc_kwargs.update({'parnames':self.parinfo.parnames})
def __call__(self,*args,**kwargs):
#if 'use_lmfit' in kwargs: kwargs.pop('use_lmfit')
use_lmfit = kwargs.pop('use_lmfit') if 'use_lmfit' in kwargs else self.use_lmfit
if use_lmfit:
return self.lmfitter(*args,**kwargs)
if self.multisingle == 'single':
return self.onepeakammoniafit(*args,**kwargs)
elif self.multisingle == 'multi':
return self.multinh3fit(*args,**kwargs)
def n_ammonia(self, pars=None, parnames=None, **kwargs):
"""
Returns a function that sums over N ammonia line profiles, where N is the length of
tkin,tex,ntot,width,xoff_v,fortho *OR* N = len(pars) / 6
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
*pars* [ list ]
a list with len(pars) = (6-nfixed)n, assuming
tkin,tex,ntot,width,xoff_v,fortho repeated
*parnames* [ list ]
len(parnames) must = len(pars). parnames determine how the ammonia
function parses the arguments
"""
if hasattr(pars,'values'):
# important to treat as Dictionary, since lmfit params & parinfo both have .items
parnames,parvals = zip(*pars.items())
parnames = [p.lower() for p in parnames]
parvals = [p.value for p in parvals]
elif parnames is None:
parvals = pars
parnames = self.parnames
else:
parvals = pars
if len(pars) != len(parnames):
# this should only be needed when other codes are changing the number of peaks
# during a copy, as opposed to letting them be set by a __call__
# (n_modelfuncs = n_ammonia can be called directly)
# n_modelfuncs doesn't care how many peaks there are
if len(pars) % len(parnames) == 0:
parnames = [p for ii in range(len(pars)/len(parnames)) for p in parnames]
npars = len(parvals) / self.npeaks
else:
raise ValueError("Wrong array lengths passed to n_ammonia!")
else:
npars = len(parvals) / self.npeaks
self._components = []
def L(x):
v = np.zeros(len(x))
for jj in xrange(self.npeaks):
modelkwargs = kwargs.copy()
for ii in xrange(npars):
name = parnames[ii+jj*npars].strip('0123456789').lower()
modelkwargs.update({name:parvals[ii+jj*npars]})
v += ammonia(x,**modelkwargs)
return v
return L
def components(self, xarr, pars, hyperfine=False):
"""
Ammonia components don't follow the default, since in Galactic astronomy the hyperfine components should be well-separated.
If you want to see the individual components overlaid, you'll need to pass hyperfine to the plot_fit call
"""
comps=[]
for ii in xrange(self.npeaks):
if hyperfine:
modelkwargs = dict(zip(self.parnames[ii*self.npars:(ii+1)*self.npars],pars[ii*self.npars:(ii+1)*self.npars]))
comps.append( ammonia(xarr,return_components=True,**modelkwargs) )
else:
modelkwargs = dict(zip(self.parnames[ii*self.npars:(ii+1)*self.npars],pars[ii*self.npars:(ii+1)*self.npars]))
comps.append( [ammonia(xarr,return_components=False,**modelkwargs)] )
modelcomponents = np.concatenate(comps)
return modelcomponents
def multinh3fit(self, xax, data, npeaks=1, err=None,
params=(20,20,14,1.0,0.0,0.5),
parnames=None,
fixed=(False,False,False,False,False,False),
limitedmin=(True,True,True,True,False,True),
limitedmax=(False,False,False,False,False,True), minpars=(2.73,2.73,0,0,0,0),
parinfo=None,
maxpars=(0,0,0,0,0,1), quiet=True, shh=True, veryverbose=False, **kwargs):
"""
Fit multiple nh3 profiles (multiple can be 1)
Inputs:
xax - x axis
data - y axis
npeaks - How many nh3 profiles to fit? Default 1 (this could supersede onedgaussfit)
err - error corresponding to data
These parameters need to have length = 6*npeaks. If npeaks > 1 and length = 6, they will
be replicated npeaks times, otherwise they will be reset to defaults:
params - Fit parameters: [tkin, tex, ntot (or tau), width, offset, ortho fraction] * npeaks
If len(params) % 6 == 0, npeaks will be set to len(params) / 6
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0, Tex and Tkin > Tcmb)
limitedmax/maxpars - set upper limits on each parameter
parnames - default parameter names, important for setting kwargs in model ['tkin','tex','ntot','width','xoff_v','fortho']
quiet - should MPFIT output each iteration?
shh - output final parameters?
Returns:
Fit parameters
Model
Fit errors
chi2
"""
if parinfo is None:
self.npars = len(params) / npeaks
if len(params) != npeaks and (len(params) / self.npars) > npeaks:
npeaks = len(params) / self.npars
self.npeaks = npeaks
if isinstance(params,np.ndarray): params=params.tolist()
# this is actually a hack, even though it's decently elegant
# somehow, parnames was being changed WITHOUT being passed as a variable
# this doesn't make sense - at all - but it happened.
# (it is possible for self.parnames to have npars*npeaks elements where
# npeaks > 1 coming into this function even though only 6 pars are specified;
# _default_parnames is the workaround)
if parnames is None: parnames = copy.copy(self._default_parnames)
partype_dict = dict(zip(['params','parnames','fixed','limitedmin','limitedmax','minpars','maxpars'],
[params,parnames,fixed,limitedmin,limitedmax,minpars,maxpars]))
# make sure all various things are the right length; if they're not, fix them using the defaults
for partype,parlist in partype_dict.iteritems():
if len(parlist) != self.npars*self.npeaks:
# if you leave the defaults, or enter something that can be multiplied by npars to get to the
# right number of gaussians, it will just replicate
if len(parlist) == self.npars:
partype_dict[partype] *= npeaks
elif len(parlist) > self.npars:
# DANGER: THIS SHOULD NOT HAPPEN!
print "WARNING! Input parameters were longer than allowed for variable ",parlist
partype_dict[partype] = partype_dict[partype][:self.npars]
elif parlist==params: # this instance shouldn't really be possible
partype_dict[partype] = [20,20,1e10,1.0,0.0,0.5] * npeaks
elif parlist==fixed:
partype_dict[partype] = [False] * len(params)
elif parlist==limitedmax: # only fortho, fillingfraction have upper limits
partype_dict[partype] = (np.array(parnames) == 'fortho') + (np.array(parnames) == 'fillingfraction')
elif parlist==limitedmin: # no physical values can be negative except velocity
partype_dict[partype] = (np.array(parnames) != 'xoff_v')
elif parlist==minpars: # all have minima of zero except kinetic temperature, which can't be below CMB. Excitation temperature technically can be, but not in this model
partype_dict[partype] = ((np.array(parnames) == 'tkin') + (np.array(parnames) == 'tex')) * 2.73
elif parlist==maxpars: # fractions have upper limits of 1.0
partype_dict[partype] = ((np.array(parnames) == 'fortho') + (np.array(parnames) == 'fillingfraction')).astype('float')
elif parlist==parnames: # assumes the right number of parnames (essential)
partype_dict[partype] = list(parnames) * self.npeaks
if len(parnames) != len(partype_dict['params']):
raise ValueError("Wrong array lengths AFTER fixing them")
# used in components. Is this just a hack?
self.parnames = partype_dict['parnames']
parinfo = [ {'n':ii, 'value':partype_dict['params'][ii],
'limits':[partype_dict['minpars'][ii],partype_dict['maxpars'][ii]],
'limited':[partype_dict['limitedmin'][ii],partype_dict['limitedmax'][ii]], 'fixed':partype_dict['fixed'][ii],
'parname':partype_dict['parnames'][ii]+str(ii/self.npars),
'mpmaxstep':float(partype_dict['parnames'][ii] in ('tex','tkin')), # must force small steps in temperature (True = 1.0)
'error': 0}
for ii in xrange(len(partype_dict['params'])) ]
# hack: remove 'fixed' pars
parinfo_with_fixed = parinfo
parinfo = [p for p in parinfo_with_fixed if not p['fixed']]
fixed_kwargs = dict((p['parname'].strip("0123456789").lower(),p['value']) for p in parinfo_with_fixed if p['fixed'])
# don't do this - it breaks the NEXT call because npars != len(parnames) self.parnames = [p['parname'] for p in parinfo]
# this is OK - not a permanent change
parnames = [p['parname'] for p in parinfo]
# not OK self.npars = len(parinfo)/self.npeaks
parinfo = ParinfoList([Parinfo(p) for p in parinfo], preserve_order=True)
#import pdb; pdb.set_trace()
else:
self.parinfo = ParinfoList([Parinfo(p) for p in parinfo], preserve_order=True)
parinfo_with_fixed = None
fixed_kwargs = {}
fitfun_kwargs = dict(kwargs.items()+fixed_kwargs.items())
npars = len(parinfo)/self.npeaks
# (fortho0 is not fortho)
# this doesn't work if parinfo_with_fixed is not None:
# this doesn't work for p in parinfo_with_fixed:
# this doesn't work # users can change the defaults while holding them fixed
# this doesn't work if p['fixed']:
# this doesn't work kwargs.update({p['parname']:p['value']})
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-self.n_ammonia(pars=p, parnames=parinfo.parnames, **fitfun_kwargs)(x))]
else:
def f(p,fjac=None): return [0,(y-self.n_ammonia(pars=p, parnames=parinfo.parnames, **fitfun_kwargs)(x))/err]
return f
if veryverbose:
print "GUESSES: "
print "\n".join(["%s: %s" % (p['parname'],p['value']) for p in parinfo])
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet)
mpp = mp.params
if mp.perror is not None: mpperr = mp.perror
else: mpperr = mpp*0
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
parinfo[i]['error'] = mpperr[i]
if not shh:
print "Fit status: ",mp.status
print "Fit message: ",mp.errmsg
print "Final fit values: "
for i,p in enumerate(mpp):
print parinfo[i]['parname'],p," +/- ",mpperr[i]
print "Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp)
if any(['tex' in s for s in parnames]) and any(['tkin' in s for s in parnames]):
texnum = (i for i,s in enumerate(parnames) if 'tex' in s)
tkinnum = (i for i,s in enumerate(parnames) if 'tkin' in s)
for txn,tkn in zip(texnum,tkinnum):
if mpp[txn] > mpp[tkn]: mpp[txn] = mpp[tkn] # force Tex>Tkin to Tex=Tkin (already done in n_ammonia)
self.mp = mp
if parinfo_with_fixed is not None:
# self self.parinfo preserving the 'fixed' parameters
# ORDER MATTERS!
for p in parinfo:
parinfo_with_fixed[p['n']] = p
self.parinfo = ParinfoList([Parinfo(p) for p in parinfo_with_fixed], preserve_order=True)
else:
self.parinfo = parinfo
self.parinfo = ParinfoList([Parinfo(p) for p in parinfo], preserve_order=True)
# I don't THINK these are necessary?
#self.parinfo = parinfo
#self.parinfo = ParinfoList([Parinfo(p) for p in self.parinfo])
# need to restore the fixed parameters....
# though the above commented out section indicates that I've done and undone this dozens of times now
# (a test has been added to test_nh3.py)
# this was NEVER included or tested because it breaks the order
#for par in parinfo_with_fixed:
# if par.parname not in self.parinfo.keys():
# self.parinfo.append(par)
self.mpp = self.parinfo.values
self.mpperr = self.parinfo.errors
self.mppnames = self.parinfo.names
self.model = self.n_ammonia(pars=self.mpp, parnames=self.mppnames, **kwargs)(xax)
#if self.model.sum() == 0:
# print "DON'T FORGET TO REMOVE THIS ERROR!"
# raise ValueError("Model is zeros.")
indiv_parinfo = [self.parinfo[jj*self.npars:(jj+1)*self.npars] for jj in xrange(len(self.parinfo)/self.npars)]
modelkwargs = [
dict([(p['parname'].strip("0123456789").lower(),p['value']) for p in pi])
for pi in indiv_parinfo]
self.tau_list = [ammonia(xax,return_tau=True,**mk) for mk in modelkwargs]
return self.mpp,self.model,self.mpperr,chi2
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# TKIN, TEX, ntot, width, center, ortho fraction
return [20,10, 1e15, 1.0, 0.0, 1.0]
def annotations(self):
from decimal import Decimal # for formatting
tex_key = {'tkin':'T_K','tex':'T_{ex}','ntot':'N','fortho':'F_o','width':'\\sigma','xoff_v':'v','fillingfraction':'FF','tau':'\\tau_{1-1}'}
# small hack below: don't quantize if error > value. We want to see the values.
label_list = []
for pinfo in self.parinfo:
parname = tex_key[pinfo['parname'].strip("0123456789").lower()]
parnum = int(pinfo['parname'][-1])
if pinfo['fixed']:
formatted_value = "%s" % pinfo['value']
pm = ""
formatted_error=""
else:
formatted_value = Decimal("%g" % pinfo['value']).quantize(Decimal("%0.2g" % (min(pinfo['error'],pinfo['value']))))
pm = "$\\pm$"
formatted_error = Decimal("%g" % pinfo['error']).quantize(Decimal("%0.2g" % pinfo['error']))
label = "$%s(%i)$=%8s %s %8s" % (parname, parnum, formatted_value, pm, formatted_error)
label_list.append(label)
labels = tuple(mpcb.flatten(label_list))
return labels
class ammonia_model_vtau(ammonia_model):
def __init__(self,**kwargs):
super(ammonia_model_vtau,self).__init__()
self.parnames = ['tkin','tex','tau','width','xoff_v','fortho']
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# TKIN, TEX, ntot, width, center, ortho fraction
return [20,10, 1, 1.0, 0.0, 1.0]
def __call__(self,*args,**kwargs):
if self.multisingle == 'single':
return self.onepeakammoniafit(*args,**kwargs)
elif self.multisingle == 'multi':
return self.multinh3fit(*args,**kwargs)
| mit |
jakevdp/seaborn | doc/sphinxext/ipython_directive.py | 37 | 37557 | # -*- coding: utf-8 -*-
"""
Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives). For example, to enable syntax highlighting
and the IPython directive::
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive']
The IPython directive outputs code-blocks with the language 'ipython'. So
if you do not have the syntax highlighting extension enabled as well, then
all rendered code-blocks will be uncolored. By default this directive assumes
that your prompts are unchanged IPython ones, but this can be customized.
The configurable options that can be placed in conf.py are:
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_mplbackend:
The string which specifies if the embedded Sphinx shell should import
Matplotlib and set the backend. The value specifies a backend that is
passed to `matplotlib.use()` before any lines in `ipython_execlines` are
executed. If not specified in conf.py, then the default value of 'agg' is
used. To use the IPython directive without matplotlib as a dependency, set
the value to `None`. It may end up that matplotlib is still imported
if the user specifies so in `ipython_execlines` or makes use of the
@savefig pseudo decorator.
ipython_execlines:
A list of strings to be exec'd in the embedded Sphinx shell. Typical
usage is to make certain packages always available. Set this to an empty
list if you wish to have no imports always available. If specified in
conf.py as `None`, then it has the effect of making no imports available.
If omitted from conf.py altogether, then the default value of
['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
ipython_holdcount
When the @suppress pseudo-decorator is used, the execution count can be
incremented or not. The default behavior is to hold the execution count,
corresponding to a value of `True`. Set this to `False` to increment
the execution count after each suppressed command.
As an example, to use the IPython directive when `matplotlib` is not available,
one sets the backend to `None`::
ipython_mplbackend = None
An example usage of the directive is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
In [2]: y = x**2
In [3]: print(y)
See http://matplotlib.org/sampledoc/ipython_directive.html for additional
documentation.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
from __future__ import unicode_literals
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import os
import re
import sys
import tempfile
import ast
from pandas.compat import zip, range, map, lmap, u, cStringIO as StringIO
import warnings
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
text_type = str
else:
from StringIO import StringIO
text_type = unicode
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class DecodingStringIO(StringIO, object):
def __init__(self,buf='',encodings=('utf8',), *args, **kwds):
super(DecodingStringIO, self).__init__(buf, *args, **kwds)
self.set_encodings(encodings)
def set_encodings(self, encodings):
self.encodings = encodings
def write(self,data):
if isinstance(data, text_type):
return super(DecodingStringIO, self).write(data)
else:
for enc in self.encodings:
try:
data = data.decode(enc)
return super(DecodingStringIO, self).write(data)
except :
pass
# default to brute utf8 if no encoding succeded
return super(DecodingStringIO, self).write(data.decode('utf8', 'replace'))
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None,state=None):
self.cout = DecodingStringIO(u'')
if exec_lines is None:
exec_lines = []
self.state = state
# Create config object for IPython
config = Config()
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done after instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
try:
source_raw = splitter.source_raw_reset()[1]
except:
# recent ipython #4504
source_raw = splitter.raw_reset()
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
# set the encodings to be used by DecodingStringIO
# to convert the execution output into unicode if
# needed. this attrib is set by IpythonDirective.run()
# based on the specified block options, defaulting to ['ut
self.cout.set_encodings(self.output_encoding)
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
# context information
filename = self.state.document.current_source
lineno = self.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write('-' * 76 + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, decorator, image_file,
image_directive)
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
TAB = ' ' * 4
if is_doctest and output is not None:
found = output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = ('plt.gcf().savefig("%s", bbox_inches="tight", '
'dpi=100)' % image_file)
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
(out_data, input_lines, output, is_doctest, decorator,
image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
decorator, image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
class IPythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept': directives.flag,
'okwarning': directives.flag,
'output_encoding': directives.unchanged_required
}
shell = None
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
mplbackend = config.ipython_mplbackend
exec_lines = config.ipython_execlines
hold_count = config.ipython_holdcount
return (savefig_dir, source_dir, rgxin, rgxout,
promptin, promptout, mplbackend, exec_lines, hold_count)
def setup(self):
# Get configuration values.
(savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
mplbackend, exec_lines, hold_count) = self.get_config_options()
if self.shell is None:
# We will be here many times. However, when the
# EmbeddedSphinxShell is created, its interactive shell member
# is the same for each instance.
if mplbackend:
import matplotlib
# Repeated calls to use() will not hurt us since `mplbackend`
# is the same each time.
matplotlib.use(mplbackend)
# Must be called after (potentially) importing matplotlib and
# setting its backend since exec_lines might import pylab.
self.shell = EmbeddedSphinxShell(exec_lines, self.state)
# Store IPython directive to enable better error messages
self.shell.directive = self
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
if not self.state.document.current_source in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
self.shell.IP.prompt_manager.width = 0
self.seen_docs.add(self.state.document.current_source)
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
self.shell.hold_count = hold_count
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.is_okwarning = 'okwarning' in options
self.shell.output_encoding = [options.get('output_encoding', 'utf8')]
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
if len(lines)>2:
if debug:
print('\n'.join(lines))
else:
# This has to do with input, not output. But if we comment
# these lines out, then no IPython code will appear in the
# final output.
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
# cleanup
self.teardown()
return []
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', None, 'env')
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
# We could just let matplotlib pick whatever is specified as the default
# backend in the matplotlibrc file, but this would cause issues if the
# backend didn't work in headless environments. For this reason, 'agg'
# is a good default backend choice.
app.add_config_value('ipython_mplbackend', 'agg', 'env')
# If the user sets this config value to `None`, then EmbeddedSphinxShell's
# __init__ method will treat it as [].
execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
app.add_config_value('ipython_execlines', execlines, 'env')
app.add_config_value('ipython_holdcount', True, 'env')
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
@doctest float
In [155]: np.arange(16).reshape(4,4)
Out[155]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
In [1]: x = np.arange(16, dtype=float).reshape(4,4)
In [2]: x[0,0] = np.inf
In [3]: x[0,1] = np.nan
@doctest float
In [4]: x
Out[4]:
array([[ inf, nan, 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
IPythonDirective('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| bsd-3-clause |
INCF/BIDS2ISATab | setup.py | 1 | 2176 | from setuptools import setup
import os
here = os.path.abspath(os.path.dirname(__file__))
setup(
name="BIDS2ISATab",
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='0.1.0',
description="Command line tool generating ISA-Tab compatible description from a Brain Imaging Data Structure "
"compatible dataset.",
long_description="Command line tool generating ISA-Tab compatible description from a Brain Imaging Data Structure "
"compatible dataset.",
# The project URL.
url='https://github.com/INCF/BIDS2ISATab',
# Choose your license
license='BSD',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='bids isatab',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages.
packages=["bids2isatab"],
# List run-time dependencies here. These will be installed by pip when your
# project is installed.
install_requires = ["future",
"pandas",
'nibabel'],
include_package_data=True,
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'bids2isatab=bids2isatab.main:main',
],
},
)
| apache-2.0 |
zooniverse/aggregation | experimental/clusteringAlg/adaptiveDBSCAN.py | 2 | 4734 | #!/usr/bin/env python
__author__ = 'greg'
from sklearn.cluster import DBSCAN
import numpy as np
import math
def dist(c1,c2):
return math.sqrt((c1[0]-c2[0])**2 + (c1[1]-c2[1])**2)
class CannotSplit(Exception):
def __init__(self,value):
self.value = value
def __str__(self):
return ""
samples_needed = 3
def adaptiveDBSCAN(XYpts,user_ids):
if XYpts == []:
return []
pts_in_each_cluster = []
users_in_each_cluster = []
cluster_centers = []
#increase the epsilon until we don't have any nearby clusters corresponding to non-overlapping
#sets of users
X = np.array(XYpts)
#for epsilon in [5,10,15,20,25,30]:
for first_epsilon in [100,200,300,400]:
db = DBSCAN(eps=first_epsilon, min_samples=samples_needed).fit(X)
labels = db.labels_
pts_in_each_cluster = []
users_in_each_cluster = []
cluster_centers = []
for k in sorted(set(labels)):
if k == -1:
continue
class_member_mask = (labels == k)
pts_in_cluster = list(X[class_member_mask])
xSet,ySet = zip(*pts_in_cluster)
cluster_centers.append((np.mean(xSet),np.mean(ySet)))
pts_in_each_cluster.append(pts_in_cluster[:])
users_in_each_cluster.append([u for u,l in zip(user_ids,labels) if l == k])
#do we have any adjacent clusters with non-overlapping sets of users
#if so, we should merge them by increasing the epsilon value
cluster_compare = []
for cluster_index, (c1,users) in enumerate(zip(cluster_centers,users_in_each_cluster)):
for cluster_index, (c2,users2) in enumerate(zip(cluster_centers[cluster_index+1:],users_in_each_cluster[cluster_index+1:])):
overlappingUsers = [u for u in users if u in users2]
cluster_compare.append((dist(c1,c2),overlappingUsers))
cluster_compare.sort(key = lambda x:x[0])
needToMerge = [] in [c[1] for c in cluster_compare[:10]]
if not(needToMerge):
break
#print epsilon
#print [c[1] for c in cluster_compare[:10]]
centers_to_return = []
assert not(needToMerge)
#do we need to split any clusters?
for cluster_index in range(len(cluster_centers)):
#print "splitting"
needToSplit = (sorted(users_in_each_cluster[cluster_index]) != sorted(list(set(users_in_each_cluster[cluster_index]))))
if needToSplit:
subcluster_centers = []
stillToSplit = []
X = np.array(pts_in_each_cluster[cluster_index])
#for epsilon in [30,25,20,15,10,5,1,0.1,0.01]:
for second_epsilon in range(200,1,-2):#[400,300,200,100,80,75,65,60,50,25,24,23,22,21,20,19,18,17,16,15,14,13,10,5,1]:
db = DBSCAN(eps=second_epsilon, min_samples=samples_needed).fit(X)
labels = db.labels_
subcluster_centers = []
needToSplit = False
for k in sorted(set(labels)):
if k == -1:
continue
class_member_mask = (labels == k)
users_in_subcluster = [u for u,l in zip(users_in_each_cluster[cluster_index],labels) if l == k]
needToSplit = (sorted(users_in_subcluster) != sorted(list(set(users_in_subcluster))))
if needToSplit:
stillToSplit = list(X[class_member_mask])
break
pts_in_cluster = list(X[class_member_mask])
xSet,ySet = zip(*pts_in_cluster)
subcluster_centers.append((np.mean(xSet),np.mean(ySet)))
if not(needToSplit):
break
if needToSplit:
print "second is " + str(second_epsilon)
print stillToSplit
for i in range(len(stillToSplit)):
p1 = stillToSplit[i]
for j in range(len(stillToSplit[i+1:])):
p2 = stillToSplit[j+i+1]
print math.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2),
#print (i,j+i+1),
print
print X
print users_in_each_cluster[cluster_index]
raise CannotSplit(pts_in_each_cluster[cluster_index])
centers_to_return.extend(subcluster_centers)
#if needToSplit:
# print pts_in_each_cluster[cluster_index]
# print users_in_each_cluster[cluster_index]
#else:
else:
centers_to_return.append(cluster_centers[cluster_index])
return centers_to_return | apache-2.0 |
jrleja/bsfh | misc/timings_pyfsps.py | 3 | 4274 | #compare a lookup table of spectra at ages and metallicities to
#calls to fsps.sps.get_spectrum() for different metallicities
import time, os, subprocess, re, sys
import numpy as np
#import matplotlib.pyplot as pl
import fsps
from prospect import sources as sps_basis
from prospect.models import sedmodel
def run_command(cmd):
"""
Open a child process, and return its exit status and stdout.
"""
child = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out = [s for s in child.stdout]
w = child.wait()
return os.WEXITSTATUS(w), out
# Check to make sure that the required environment variable is present.
try:
ev = os.environ["SPS_HOME"]
except KeyError:
raise ImportError("You need to have the SPS_HOME environment variable")
# Check the SVN revision number.
cmd = ["svnversion", ev]
stat, out = run_command(" ".join(cmd))
fsps_vers = int(re.match("^([0-9])+", out[0]).group(0))
sps = fsps.StellarPopulation(zcontinuous=True)
print('FSPS version = {}'.format(fsps_vers))
print('Zs={0}, N_lambda={1}'.format(sps.zlegend, len(sps.wavelengths)))
print('single age')
def spec_from_fsps(z, t, s):
t0 = time.time()
sps.params['logzsol'] = z
sps.params['sigma_smooth'] = s
sps.params['tage'] = t
wave, spec = sps.get_spectrum(peraa=True, tage = sps.params['tage'])
#print(spec.shape)
return time.time()-t0
def mags_from_fsps(z, t, s):
t0 = time.time()
sps.params['zred']=t
sps.params['logzsol'] = z
sps.params['sigma_smooth'] = s
sps.params['tage'] = t
mags = sps.get_mags(tage = sps.params['tage'], redshift=0.0)
#print(spec.shape)
return time.time()-t0
def spec_from_ztinterp(z, t, s):
t0 = time.time()
sps.params['logzsol'] = z
sps.params['sigma_smooth'] = s
sps.params['tage'] = t
sps.params['imf3'] = s
spec, m, l = sps.ztinterp(sps.params['logzsol'], sps.params['tage'], peraa=True)
#print(spec.shape)
return time.time()-t0
if sys.argv[1] == 'mags':
from_fsps = mags_from_fsps
print('timing get_mags')
print('nbands = {}'.format(len(sps.get_mags(tage=1.0))))
elif sys.argv[1] == 'spec':
from_fsps = spec_from_fsps
print('timing get_spectrum')
elif sys.argv[1] == 'ztinterp':
from_fsps = spec_from_ztinterp
print('timing get_spectrum')
elif sys.argv[1] == 'sedpy':
from sedpy import observate
nbands = len(sps.get_mags(tage=1.0))
fnames = nbands * ['sdss_r0']
filters = observate.load_filters(fnames)
def mags_from_sedpy(z, t, s):
t0 = time.time()
sps.params['logzsol'] = z
sps.params['sigma_smooth'] = s
sps.params['tage'] = t
wave, spec = sps.get_spectrum(peraa=True,
tage = sps.params['tage'])
mags = observate.getSED(wave, spec, filters)
return time.time()-t0
from_fsps = mags_from_sedpy
sps.params['add_neb_emission'] = False
sps.params['smooth_velocity'] = True
sps.params['sfh'] = 0
ntry = 30
zz = np.random.uniform(-1,0,ntry)
tt = np.random.uniform(0.1,4,ntry)
ss = np.random.uniform(1,2.5,ntry)
#make sure all z's already compiled
_ =[from_fsps(z, 1.0, 0.0) for z in [-1, -0.8, -0.6, -0.4, -0.2, 0.0]]
all_dur = []
print('no neb emission:')
dur_many = np.zeros(ntry)
for i in xrange(ntry):
dur_many[i] = from_fsps(zz[i], tt[i], ss[i])
print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))
all_dur += [dur_many]
print('no neb emission, no smooth:')
dur_many = np.zeros(ntry)
for i in xrange(ntry):
dur_many[i] = from_fsps(zz[i], tt[i], 0.0)
print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))
all_dur += [dur_many]
sps.params['add_neb_emission'] = True
print('neb emission:')
dur_many = np.zeros(ntry)
for i in xrange(ntry):
dur_many[i] = from_fsps(zz[i], tt[i], ss[i])
print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))
all_dur += [dur_many]
print('neb emission, no smooth:')
dur_many = np.zeros(ntry)
for i in xrange(ntry):
dur_many[i] = from_fsps(zz[i], tt[i], 0.0)
print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))
all_dur += [dur_many]
| mit |
ClinicalGraphics/scikit-image | doc/examples/xx_applications/plot_morphology.py | 6 | 8329 | """
=======================
Morphological Filtering
=======================
Morphological image processing is a collection of non-linear operations related
to the shape or morphology of features in an image, such as boundaries,
skeletons, etc. In any given technique, we probe an image with a small shape or
template called a structuring element, which defines the region of interest or
neighborhood around a pixel.
In this document we outline the following basic morphological operations:
1. Erosion
2. Dilation
3. Opening
4. Closing
5. White Tophat
6. Black Tophat
7. Skeletonize
8. Convex Hull
To get started, let's load an image using ``io.imread``. Note that morphology
functions only work on gray-scale or binary images, so we set ``as_grey=True``.
"""
import matplotlib.pyplot as plt
from skimage.data import data_dir
from skimage.util import img_as_ubyte
from skimage import io
phantom = img_as_ubyte(io.imread(data_dir+'/phantom.png', as_grey=True))
fig, ax = plt.subplots()
ax.imshow(phantom, cmap=plt.cm.gray)
"""
.. image:: PLOT2RST.current_figure
Let's also define a convenience function for plotting comparisons:
"""
def plot_comparison(original, filtered, filter_name):
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4), sharex=True, sharey=True)
ax1.imshow(original, cmap=plt.cm.gray)
ax1.set_title('original')
ax1.axis('off')
ax1.set_adjustable('box-forced')
ax2.imshow(filtered, cmap=plt.cm.gray)
ax2.set_title(filter_name)
ax2.axis('off')
ax2.set_adjustable('box-forced')
"""
Erosion
=======
Morphological ``erosion`` sets a pixel at (i, j) to the *minimum over all
pixels in the neighborhood centered at (i, j)*. The structuring element,
``selem``, passed to ``erosion`` is a boolean array that describes this
neighborhood. Below, we use ``disk`` to create a circular structuring element,
which we use for most of the following examples.
"""
from skimage.morphology import erosion, dilation, opening, closing, white_tophat
from skimage.morphology import black_tophat, skeletonize, convex_hull_image
from skimage.morphology import disk
selem = disk(6)
eroded = erosion(phantom, selem)
plot_comparison(phantom, eroded, 'erosion')
"""
.. image:: PLOT2RST.current_figure
Notice how the white boundary of the image disappears or gets eroded as we
increase the size of the disk. Also notice the increase in size of the two
black ellipses in the center and the disappearance of the 3 light grey
patches in the lower part of the image.
Dilation
========
Morphological ``dilation`` sets a pixel at (i, j) to the *maximum over all
pixels in the neighborhood centered at (i, j)*. Dilation enlarges bright
regions and shrinks dark regions.
"""
dilated = dilation(phantom, selem)
plot_comparison(phantom, dilated, 'dilation')
"""
.. image:: PLOT2RST.current_figure
Notice how the white boundary of the image thickens, or gets dilated, as we
increase the size of the disk. Also notice the decrease in size of the two
black ellipses in the centre, and the thickening of the light grey circle in
the center and the 3 patches in the lower part of the image.
Opening
=======
Morphological ``opening`` on an image is defined as an *erosion followed by a
dilation*. Opening can remove small bright spots (i.e. "salt") and connect
small dark cracks.
"""
opened = opening(phantom, selem)
plot_comparison(phantom, opened, 'opening')
"""
.. image:: PLOT2RST.current_figure
Since ``opening`` an image starts with an erosion operation, light regions that
are *smaller* than the structuring element are removed. The dilation operation
that follows ensures that light regions that are *larger* than the structuring
element retain their original size. Notice how the light and dark shapes in the
center their original thickness but the 3 lighter patches in the bottom get
completely eroded. The size dependence is highlighted by the outer white ring:
The parts of the ring thinner than the structuring element were completely
erased, while the thicker region at the top retains its original thickness.
Closing
=======
Morphological ``closing`` on an image is defined as a *dilation followed by an
erosion*. Closing can remove small dark spots (i.e. "pepper") and connect
small bright cracks.
To illustrate this more clearly, let's add a small crack to the white border:
"""
phantom = img_as_ubyte(io.imread(data_dir+'/phantom.png', as_grey=True))
phantom[10:30, 200:210] = 0
closed = closing(phantom, selem)
plot_comparison(phantom, closed, 'closing')
"""
.. image:: PLOT2RST.current_figure
Since ``closing`` an image starts with an dilation operation, dark regions
that are *smaller* than the structuring element are removed. The dilation
operation that follows ensures that dark regions that are *larger* than the
structuring element retain their original size. Notice how the white ellipses
at the bottom get connected because of dilation, but other dark region retain
their original sizes. Also notice how the crack we added is mostly removed.
White tophat
============
The ``white_tophat`` of an image is defined as the *image minus its
morphological opening*. This operation returns the bright spots of the image
that are smaller than the structuring element.
To make things interesting, we'll add bright and dark spots to the image:
"""
phantom = img_as_ubyte(io.imread(data_dir+'/phantom.png', as_grey=True))
phantom[340:350, 200:210] = 255
phantom[100:110, 200:210] = 0
w_tophat = white_tophat(phantom, selem)
plot_comparison(phantom, w_tophat, 'white tophat')
"""
.. image:: PLOT2RST.current_figure
As you can see, the 10-pixel wide white square is highlighted since it is
smaller than the structuring element. Also, the thin, white edges around most
of the ellipse are retained because they're smaller than the structuring
element, but the thicker region at the top disappears.
Black tophat
============
The ``black_tophat`` of an image is defined as its morphological **closing
minus the original image**. This operation returns the *dark spots of the
image that are smaller than the structuring element*.
"""
b_tophat = black_tophat(phantom, selem)
plot_comparison(phantom, b_tophat, 'black tophat')
"""
.. image:: PLOT2RST.current_figure
As you can see, the 10-pixel wide black square is highlighted since it is
smaller than the structuring element.
Duality
-------
As you should have noticed, many of these operations are simply the reverse
of another operation. This duality can be summarized as follows:
1. Erosion <-> Dilation
2. Opening <-> Closing
3. White tophat <-> Black tophat
Skeletonize
===========
Thinning is used to reduce each connected component in a binary image to a
*single-pixel wide skeleton*. It is important to note that this is performed
on binary images only.
"""
from skimage import img_as_bool
horse = ~img_as_bool(io.imread(data_dir+'/horse.png', as_grey=True))
sk = skeletonize(horse)
plot_comparison(horse, sk, 'skeletonize')
"""
.. image:: PLOT2RST.current_figure
As the name suggests, this technique is used to thin the image to 1-pixel wide
skeleton by applying thinning successively.
Convex hull
===========
The ``convex_hull_image`` is the *set of pixels included in the smallest
convex polygon that surround all white pixels in the input image*. Again note
that this is also performed on binary images.
"""
hull1 = convex_hull_image(horse)
plot_comparison(horse, hull1, 'convex hull')
"""
.. image:: PLOT2RST.current_figure
As the figure illustrates, ``convex_hull_image`` gives the smallest polygon
which covers the white or True completely in the image.
If we add a small grain to the image, we can see how the convex hull adapts to
enclose that grain:
"""
import numpy as np
horse2 = np.copy(horse)
horse2[45:50, 75:80] = 1
hull2 = convex_hull_image(horse2)
plot_comparison(horse2, hull2, 'convex hull')
"""
.. image:: PLOT2RST.current_figure
Additional Resources
====================
1. `MathWorks tutorial on morphological processing
<http://www.mathworks.com/help/images/morphology-fundamentals-dilation-and-erosion.html>`_
2. `Auckland university's tutorial on Morphological Image Processing
<http://www.cs.auckland.ac.nz/courses/compsci773s1c/lectures/ImageProcessing-html/topic4.htm>`_
3. http://en.wikipedia.org/wiki/Mathematical_morphology
"""
plt.show()
| bsd-3-clause |
codenote/chromium-test | ppapi/native_client/tests/breakpad_crash_test/crash_dump_tester.py | 6 | 8213 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import time
script_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(script_dir,
'../../tools/browser_tester'))
import browser_tester
import browsertester.browserlauncher
# This script extends browser_tester to check for the presence of
# Breakpad crash dumps.
# This reads a file of lines containing 'key:value' pairs.
# The file contains entries like the following:
# plat:Win32
# prod:Chromium
# ptype:nacl-loader
# rept:crash svc
def ReadDumpTxtFile(filename):
dump_info = {}
fh = open(filename, 'r')
for line in fh:
if ':' in line:
key, value = line.rstrip().split(':', 1)
dump_info[key] = value
fh.close()
return dump_info
def StartCrashService(browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, crash_service_exe):
# Find crash_service.exe relative to chrome.exe. This is a bit icky.
browser_dir = os.path.dirname(browser_path)
proc = subprocess.Popen([os.path.join(browser_dir, crash_service_exe),
'--v=1', # Verbose output for debugging failures
'--dumps-dir=%s' % dumps_dir,
'--pipe-name=%s' % windows_pipe_name])
def Cleanup():
# Note that if the process has already exited, this will raise
# an 'Access is denied' WindowsError exception, but
# crash_service.exe is not supposed to do this and such
# behaviour should make the test fail.
proc.terminate()
status = proc.wait()
sys.stdout.write('crash_dump_tester: %s exited with status %s\n'
% (crash_service_exe, status))
cleanup_funcs.append(Cleanup)
def ListPathsInDir(dir_path):
if os.path.exists(dir_path):
return [os.path.join(dir_path, name)
for name in os.listdir(dir_path)]
else:
return []
def GetDumpFiles(dumps_dirs):
all_files = [filename
for dumps_dir in dumps_dirs
for filename in ListPathsInDir(dumps_dir)]
sys.stdout.write('crash_dump_tester: Found %i files\n' % len(all_files))
for dump_file in all_files:
sys.stdout.write(' %s (size %i)\n'
% (dump_file, os.stat(dump_file).st_size))
return [dump_file for dump_file in all_files
if dump_file.endswith('.dmp')]
def Main(cleanup_funcs):
parser = browser_tester.BuildArgParser()
parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps',
type=int, default=0,
help='The number of crash dumps that we should expect')
parser.add_option('--expected_process_type_for_crash',
dest='expected_process_type_for_crash',
type=str, default='nacl-loader',
help='The type of Chromium process that we expect the '
'crash dump to be for')
# Ideally we would just query the OS here to find out whether we are
# running x86-32 or x86-64 Windows, but Python's win32api module
# does not contain a wrapper for GetNativeSystemInfo(), which is
# what NaCl uses to check this, or for IsWow64Process(), which is
# what Chromium uses. Instead, we just rely on the build system to
# tell us.
parser.add_option('--win64', dest='win64', action='store_true',
help='Pass this if we are running tests for x86-64 Windows')
options, args = parser.parse_args()
temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_')
def CleanUpTempDir():
browsertester.browserlauncher.RemoveDirectory(temp_dir)
cleanup_funcs.append(CleanUpTempDir)
# To get a guaranteed unique pipe name, use the base name of the
# directory we just created.
windows_pipe_name = r'\\.\pipe\%s_crash_service' % os.path.basename(temp_dir)
# This environment variable enables Breakpad crash dumping in
# non-official builds of Chromium.
os.environ['CHROME_HEADLESS'] = '1'
if sys.platform == 'win32':
dumps_dir = temp_dir
# Override the default (global) Windows pipe name that Chromium will
# use for out-of-process crash reporting.
os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name
# Launch the x86-32 crash service so that we can handle crashes in
# the browser process.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service.exe')
if options.win64:
# Launch the x86-64 crash service so that we can handle crashes
# in the NaCl loader process (nacl64.exe).
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service64.exe')
# We add a delay because there is probably a race condition:
# crash_service.exe might not have finished doing
# CreateNamedPipe() before NaCl does a crash dump and tries to
# connect to that pipe.
# TODO(mseaborn): We could change crash_service.exe to report when
# it has successfully created the named pipe.
time.sleep(1)
elif sys.platform == 'darwin':
dumps_dir = temp_dir
os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir
elif sys.platform.startswith('linux'):
# The "--user-data-dir" option is not effective for the Breakpad
# setup in Linux Chromium, because Breakpad is initialized before
# "--user-data-dir" is read. So we set HOME to redirect the crash
# dumps to a temporary directory.
home_dir = temp_dir
os.environ['HOME'] = home_dir
options.enable_crash_reporter = True
result = browser_tester.Run(options.url, options)
# Find crash dump results.
if sys.platform.startswith('linux'):
# Look in "~/.config/*/Crash Reports". This will find crash
# reports under ~/.config/chromium or ~/.config/google-chrome, or
# under other subdirectories in case the branding is changed.
dumps_dirs = [os.path.join(path, 'Crash Reports')
for path in ListPathsInDir(os.path.join(home_dir, '.config'))]
else:
dumps_dirs = [dumps_dir]
dmp_files = GetDumpFiles(dumps_dirs)
failed = False
msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\n' %
(len(dmp_files), options.expected_crash_dumps))
if len(dmp_files) != options.expected_crash_dumps:
sys.stdout.write(msg)
failed = True
for dump_file in dmp_files:
# Sanity check: Make sure dumping did not fail after opening the file.
msg = 'crash_dump_tester: ERROR: Dump file is empty\n'
if os.stat(dump_file).st_size == 0:
sys.stdout.write(msg)
failed = True
# On Windows, the crash dumps should come in pairs of a .dmp and
# .txt file.
if sys.platform == 'win32':
second_file = dump_file[:-4] + '.txt'
msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding '
'%r file\n' % (dump_file, second_file))
if not os.path.exists(second_file):
sys.stdout.write(msg)
failed = True
continue
# Check that the crash dump comes from the NaCl process.
dump_info = ReadDumpTxtFile(second_file)
if 'ptype' in dump_info:
msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\n'
% (dump_info['ptype'], options.expected_process_type_for_crash))
if dump_info['ptype'] != options.expected_process_type_for_crash:
sys.stdout.write(msg)
failed = True
else:
sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\n')
failed = True
# TODO(mseaborn): Ideally we would also check that a backtrace
# containing an expected function name can be extracted from the
# crash dump.
if failed:
sys.stdout.write('crash_dump_tester: FAILED\n')
result = 1
else:
sys.stdout.write('crash_dump_tester: PASSED\n')
return result
def MainWrapper():
cleanup_funcs = []
try:
return Main(cleanup_funcs)
finally:
for func in cleanup_funcs:
func()
if __name__ == '__main__':
sys.exit(MainWrapper())
| bsd-3-clause |
hainm/dask | dask/dataframe/shuffle.py | 4 | 2967 | from itertools import count
from collections import Iterator
from math import ceil
from toolz import merge, accumulate, merge_sorted
import toolz
from operator import getitem, setitem
import pandas as pd
import numpy as np
from pframe import pframe
from .. import threaded
from .core import DataFrame, Series, get, names
from ..compatibility import unicode
from ..utils import ignoring
tokens = ('-%d' % i for i in count(1))
def set_index(f, index, npartitions=None, **kwargs):
""" Set DataFrame index to new column
Sorts index and realigns Dataframe to new sorted order. This shuffles and
repartitions your data.
"""
npartitions = npartitions or f.npartitions
if not isinstance(index, Series):
index2 = f[index]
else:
index2 = index
divisions = (index2
.quantiles(np.linspace(0, 100, npartitions+1)[1:-1])
.compute())
return f.set_partition(index, divisions, **kwargs)
partition_names = ('set_partition-%d' % i for i in count(1))
def set_partition(f, index, divisions, get=threaded.get, **kwargs):
""" Set new partitioning along index given divisions """
divisions = unique(divisions)
name = next(names)
if isinstance(index, Series):
assert index.divisions == f.divisions
dsk = dict(((name, i), (f._partition_type.set_index, block, ind))
for i, (block, ind) in enumerate(zip(f._keys(), index._keys())))
f2 = type(f)(merge(f.dask, index.dask, dsk), name,
f.column_info, f.divisions)
else:
dsk = dict(((name, i), (f._partition_type.set_index, block, index))
for i, block in enumerate(f._keys()))
f2 = type(f)(merge(f.dask, dsk), name, f.column_info, f.divisions)
head = f2.head()
pf = pframe(like=head, divisions=divisions, **kwargs)
def append(block):
pf.append(block)
return 0
f2.map_blocks(append).compute(get=get)
pf.flush()
return from_pframe(pf)
def from_pframe(pf):
""" Load dask.array from pframe """
name = next(names)
dsk = dict(((name, i), (pframe.get_partition, pf, i))
for i in range(pf.npartitions))
return DataFrame(dsk, name, pf.columns, pf.divisions)
def unique(divisions):
""" Polymorphic unique function
>>> list(unique([1, 2, 3, 1, 2, 3]))
[1, 2, 3]
>>> unique(np.array([1, 2, 3, 1, 2, 3]))
array([1, 2, 3])
>>> unique(pd.Categorical(['Alice', 'Bob', 'Alice'], ordered=False))
[Alice, Bob]
Categories (2, object): [Alice, Bob]
"""
if isinstance(divisions, np.ndarray):
return np.unique(divisions)
if isinstance(divisions, pd.Categorical):
return pd.Categorical.from_codes(np.unique(divisions.codes),
divisions.categories, divisions.ordered)
if isinstance(divisions, (tuple, list, Iterator)):
return tuple(toolz.unique(divisions))
raise NotImplementedError()
| bsd-3-clause |
allanino/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_tkagg.py | 69 | 24593 | # Todd Miller jmiller@stsci.edu
from __future__ import division
import os, sys, math
import Tkinter as Tk, FileDialog
import tkagg # Paint image to Tk photo blitter extension
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import matplotlib.cbook as cbook
rcParams = matplotlib.rcParams
verbose = matplotlib.verbose
backend_version = Tk.TkVersion
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord = {
cursors.MOVE: "fleur",
cursors.HAND: "hand2",
cursors.POINTER: "arrow",
cursors.SELECT_REGION: "tcross",
}
def round(x):
return int(math.floor(x+0.5))
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg
def error_msg_tkpaint(msg, parent=None):
import tkMessageBox
tkMessageBox.showerror("matplotlib", msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
def show():
"""
Show all the figures and enter the gtk mainloop
This should be the last line of your script. This function sets
interactive mode to True, as detailed on
http://matplotlib.sf.net/interactive.html
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
import matplotlib
matplotlib.interactive(True)
if rcParams['tk.pythoninspect']:
os.environ['PYTHONINSPECT'] = '1'
if show._needmain:
Tk.mainloop()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
_focus = windowing.FocusManager()
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Tk.Tk()
canvas = FigureCanvasTkAgg(figure, master=window)
figManager = FigureManagerTkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FigureCanvasTkAgg(FigureCanvasAgg):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
def __init__(self, figure, master=None, resize_callback=None):
FigureCanvasAgg.__init__(self, figure)
self._idle = True
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self._tkcanvas = Tk.Canvas(
master=master, width=w, height=h, borderwidth=4)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=w, height=h)
self._tkcanvas.create_image(w/2, h/2, image=self._tkphoto)
self._resize_callback = resize_callback
self._tkcanvas.bind("<Configure>", self.resize)
self._tkcanvas.bind("<Key>", self.key_press)
self._tkcanvas.bind("<Motion>", self.motion_notify_event)
self._tkcanvas.bind("<KeyRelease>", self.key_release)
for name in "<Button-1>", "<Button-2>", "<Button-3>":
self._tkcanvas.bind(name, self.button_press_event)
for name in "<ButtonRelease-1>", "<ButtonRelease-2>", "<ButtonRelease-3>":
self._tkcanvas.bind(name, self.button_release_event)
# Mouse wheel on Linux generates button 4/5 events
for name in "<Button-4>", "<Button-5>":
self._tkcanvas.bind(name, self.scroll_event)
# Mouse wheel for windows goes to the window with the focus.
# Since the canvas won't usually have the focus, bind the
# event to the window containing the canvas instead.
# See http://wiki.tcl.tk/3893 (mousewheel) for details
root = self._tkcanvas.winfo_toplevel()
root.bind("<MouseWheel>", self.scroll_event_windows)
self._master = master
self._tkcanvas.focus_set()
# a dict from func-> cbook.Scheduler threads
self.sourced = dict()
# call the idle handler
def on_idle(*ignore):
self.idle_event()
return True
# disable until you figure out how to handle threads and interrupts
#t = cbook.Idle(on_idle)
#self._tkcanvas.after_idle(lambda *ignore: t.start())
def resize(self, event):
width, height = event.width, event.height
if self._resize_callback is not None:
self._resize_callback(event)
# compute desired figure size in inches
dpival = self.figure.dpi
winch = width/dpival
hinch = height/dpival
self.figure.set_size_inches(winch, hinch)
self._tkcanvas.delete(self._tkphoto)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=width, height=height)
self._tkcanvas.create_image(width/2,height/2,image=self._tkphoto)
self.resize_event()
self.show()
def draw(self):
FigureCanvasAgg.draw(self)
tkagg.blit(self._tkphoto, self.renderer._renderer, colormode=2)
self._master.update_idletasks()
def blit(self, bbox=None):
tkagg.blit(self._tkphoto, self.renderer._renderer, bbox=bbox, colormode=2)
self._master.update_idletasks()
show = draw
def draw_idle(self):
'update drawing area only if idle'
d = self._idle
self._idle = False
def idle_draw(*args):
self.draw()
self._idle = True
if d: self._tkcanvas.after_idle(idle_draw)
def get_tk_widget(self):
"""returns the Tk widget used to implement FigureCanvasTkAgg.
Although the initial implementation uses a Tk canvas, this routine
is intended to hide that fact.
"""
return self._tkcanvas
def motion_notify_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
def button_press_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_press_event(self, x, y, num, guiEvent=event)
def button_release_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_release_event(self, x, y, num, guiEvent=event)
def scroll_event(self, event):
x = event.x
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if num==4: step = -1
elif num==5: step = +1
else: step = 0
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def scroll_event_windows(self, event):
"""MouseWheel event processor"""
# need to find the window that contains the mouse
w = event.widget.winfo_containing(event.x_root, event.y_root)
if w == self._tkcanvas:
x = event.x_root - w.winfo_rootx()
y = event.y_root - w.winfo_rooty()
y = self.figure.bbox.height - y
step = event.delta/120.
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def _get_key(self, event):
val = event.keysym_num
if val in self.keyvald:
key = self.keyvald[val]
elif val<256:
key = chr(val)
else:
key = None
return key
def key_press(self, event):
key = self._get_key(event)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
def key_release(self, event):
key = self._get_key(event)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
def flush_events(self):
self._master.update()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerTkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The tk.Toolbar
window : The tk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
self.window = window
self.window.withdraw()
self.window.wm_title("Figure %d" % num)
self.canvas = canvas
self._num = num
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window.minsize(int(w*3/4),int(h*3/4))
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self.window )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2TkAgg( canvas, self.window )
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self._shown = False
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.show()
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
"""
this function doesn't segfault but causes the
PyEval_RestoreThread: NULL state bug on win32
"""
def destroy(*args):
self.window = None
Gcf.destroy(self._num)
if not self._shown: self.canvas._tkcanvas.bind("<Destroy>", destroy)
_focus = windowing.FocusManager()
if not self._shown:
self.window.deiconify()
# anim.py requires this
if sys.platform=='win32' : self.window.update()
else:
self.canvas.draw()
self._shown = True
def destroy(self, *args):
if Gcf.get_num_fig_managers()==0 and not matplotlib.is_interactive():
if self.window is not None:
self.window.quit()
if self.window is not None:
#self.toolbar.destroy()
self.window.destroy()
pass
self.window = None
def set_window_title(self, title):
self.window.wm_title(title)
class AxisMenu:
def __init__(self, master, naxes):
self._master = master
self._naxes = naxes
self._mbar = Tk.Frame(master=master, relief=Tk.RAISED, borderwidth=2)
self._mbar.pack(side=Tk.LEFT)
self._mbutton = Tk.Menubutton(
master=self._mbar, text="Axes", underline=0)
self._mbutton.pack(side=Tk.LEFT, padx="2m")
self._mbutton.menu = Tk.Menu(self._mbutton)
self._mbutton.menu.add_command(
label="Select All", command=self.select_all)
self._mbutton.menu.add_command(
label="Invert All", command=self.invert_all)
self._axis_var = []
self._checkbutton = []
for i in range(naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append(self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
self._mbutton.menu.invoke(self._mbutton.menu.index("Select All"))
self._mbutton['menu'] = self._mbutton.menu
self._mbar.tk_menuBar(self._mbutton)
self.set_active()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append( self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
del self._axis_var[i]
self._mbutton.menu.forget(self._checkbutton[i])
del self._checkbutton[i]
self._naxes = naxes
self.set_active()
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].get()]
return a
def set_active(self):
self._master.set_active(self.get_indices())
def invert_all(self):
for a in self._axis_var:
a.set(not a.get())
self.set_active()
def select_all(self):
for a in self._axis_var:
a.set(1)
self.set_active()
class NavigationToolbar(Tk.Frame):
"""
Public attriubutes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def _Button(self, text, file, command):
file = os.path.join(rcParams['datapath'], 'images', file)
im = Tk.PhotoImage(master=self, file=file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
xmin, xmax = canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=width, height=height,
borderwidth=2)
self.update() # Make axes menu
self.bLeft = self._Button(
text="Left", file="stock_left.ppm",
command=lambda x=-1: self.panx(x))
self.bRight = self._Button(
text="Right", file="stock_right.ppm",
command=lambda x=1: self.panx(x))
self.bZoomInX = self._Button(
text="ZoomInX",file="stock_zoom-in.ppm",
command=lambda x=1: self.zoomx(x))
self.bZoomOutX = self._Button(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=lambda x=-1: self.zoomx(x))
self.bUp = self._Button(
text="Up", file="stock_up.ppm",
command=lambda y=1: self.pany(y))
self.bDown = self._Button(
text="Down", file="stock_down.ppm",
command=lambda y=-1: self.pany(y))
self.bZoomInY = self._Button(
text="ZoomInY", file="stock_zoom-in.ppm",
command=lambda y=1: self.zoomy(y))
self.bZoomOutY = self._Button(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=lambda y=-1: self.zoomy(y))
self.bSave = self._Button(
text="Save", file="stock_save_as.ppm",
command=self.save_figure)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def panx(self, direction):
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
def pany(self, direction):
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
def zoomx(self, direction):
for a in self._active:
a.xaxis.zoom(direction)
self.canvas.draw()
def zoomy(self, direction):
for a in self._active:
a.yaxis.zoom(direction)
self.canvas.draw()
def save_figure(self):
fs = FileDialog.SaveFileDialog(master=self.window,
title='Save the figure')
try:
self.lastDir
except AttributeError:
self.lastDir = os.curdir
fname = fs.go(dir_or_file=self.lastDir) # , pattern="*.png")
if fname is None: # Cancel
return
self.lastDir = os.path.dirname(fname)
try:
self.canvas.print_figure(fname)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_tkpaint(msg)
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
if not hasattr(self, "omenu"):
self.set_active(range(naxes))
self.omenu = AxisMenu(master=self, naxes=naxes)
else:
self.omenu.adjust(naxes)
class NavigationToolbar2TkAgg(NavigationToolbar2, Tk.Frame):
"""
Public attriubutes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
self._idle = True
#Tk.Frame.__init__(self, master=self.canvas._tkcanvas)
NavigationToolbar2.__init__(self, canvas)
def destroy(self, *args):
del self.message
Tk.Frame.destroy(self, *args)
def set_message(self, s):
self.message.set(s)
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y0 = height-y0
y1 = height-y1
try: self.lastrect
except AttributeError: pass
else: self.canvas._tkcanvas.delete(self.lastrect)
self.lastrect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)
#self.canvas.draw()
def release(self, event):
try: self.lastrect
except AttributeError: pass
else:
self.canvas._tkcanvas.delete(self.lastrect)
del self.lastrect
def set_cursor(self, cursor):
self.window.configure(cursor=cursord[cursor])
def _Button(self, text, file, command):
file = os.path.join(rcParams['datapath'], 'images', file)
im = Tk.PhotoImage(master=self, file=file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def _init_toolbar(self):
xmin, xmax = self.canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=width, height=height,
borderwidth=2)
self.update() # Make axes menu
self.bHome = self._Button( text="Home", file="home.ppm",
command=self.home)
self.bBack = self._Button( text="Back", file="back.ppm",
command = self.back)
self.bForward = self._Button(text="Forward", file="forward.ppm",
command = self.forward)
self.bPan = self._Button( text="Pan", file="move.ppm",
command = self.pan)
self.bZoom = self._Button( text="Zoom",
file="zoom_to_rect.ppm",
command = self.zoom)
self.bsubplot = self._Button( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots)
self.bsave = self._Button( text="Save", file="filesave.ppm",
command = self.save_figure)
self.message = Tk.StringVar(master=self)
self._message_label = Tk.Label(master=self, textvariable=self.message)
self._message_label.pack(side=Tk.RIGHT)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def configure_subplots(self):
toolfig = Figure(figsize=(6,3))
window = Tk.Tk()
canvas = FigureCanvasTkAgg(toolfig, master=window)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def save_figure(self):
from tkFileDialog import asksaveasfilename
from tkMessageBox import showerror
filetypes = self.canvas.get_supported_filetypes().copy()
default_filetype = self.canvas.get_default_filetype()
# Tk doesn't provide a way to choose a default filetype,
# so we just have to put it first
default_filetype_name = filetypes[default_filetype]
del filetypes[default_filetype]
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
sorted_filetypes.insert(0, (default_filetype, default_filetype_name))
tk_filetypes = [
(name, '*.%s' % ext) for (ext, name) in sorted_filetypes]
fname = asksaveasfilename(
master=self.window,
title='Save the figure',
filetypes = tk_filetypes,
defaultextension = self.canvas.get_default_filetype()
)
if fname == "" or fname == ():
return
else:
try:
# This method will handle the delegation to the correct type
self.canvas.print_figure(fname)
except Exception, e:
showerror("Error saving file", str(e))
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
#if not hasattr(self, "omenu"):
# self.set_active(range(naxes))
# self.omenu = AxisMenu(master=self, naxes=naxes)
#else:
# self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def dynamic_update(self):
'update drawing area only if idle'
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
FigureManager = FigureManagerTkAgg
| agpl-3.0 |
mhoffman/kmos | kmos/cli.py | 1 | 16514 | #!/usr/bin/env python
"""Entry point module for the command-line
interface. The kmos executable should be
on the program path, import this modules
main function and run it.
To call kmos command as you would from the shell,
use ::
kmos.cli.main('...')
Every command can be shortened as long as it is non-ambiguous, e.g. ::
kmos ex <xml-file>
instead of ::
kmos export <xml-file>
etc.
"""
# Copyright 2009-2013 Max J. Hoffmann (mjhoffmann@gmail.com)
# This file is part of kmos.
#
# kmos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kmos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kmos. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import shutil
usage = {}
usage['all'] = """kmos help all
Display documentation for all commands.
"""
usage['benchmark'] = """kmos benchmark
Run 1 mio. kMC steps on model in current directory
and report runtime.
"""
usage['build'] = """kmos build
Build kmc_model.%s from *f90 files in the
current directory.
Additional Parameters ::
-d/--debug
Turn on assertion statements in F90 code
-n/--no-compiler-optimization
Do not send optimizing flags to compiler.
""" % ('pyd' if os.name == 'nt' else 'so')
usage['help'] = """kmos help <command>
Print usage information for the given command.
"""
usage['export'] = """kmos export <xml-file> [<export-path>]
Take a kmos xml-file and export all generated
source code to the export-path. There try to
build the kmc_model.%s.
Additional Parameters ::
-s/--source-only
Export source only and don't build binary
-b/--backend (local_smart|lat_int)
Choose backend. Default is "local_smart".
lat_int is EXPERIMENTAL and not made
for production, yet.
-d/--debug
Turn on assertion statements in F90 code.
(Only active in compile step)
--acf
Build the modules base_acf.f90 and proclist_acf.f90. Default is false.
This both modules contain functions to calculate ACF (autocorrelation function) and MSD (mean squared displacement).
-n/--no-compiler-optimization
Do not send optimizing flags to compiler.
""" % ('pyd' if os.name == 'nt' else 'so')
usage['settings-export'] = """kmos settings-export <xml-file> [<export-path>]
Take a kmos xml-file and export kmc_settings.py
to the export-path.
"""
usage['edit'] = """kmos edit <xml-file>
Open the kmos xml-file in a GUI to edit
the model.
"""
usage['import'] = """kmos import <xml-file>
Take a kmos xml-file and open an ipython shell
with the project_tree imported as pt.
"""
usage['rebuild'] = """kmos rebuild
Export code and rebuild binary module from XML
information included in kmc_settings.py in
current directory.
Additional Parameters ::
-d/--debug
Turn on assertion statements in F90 code
"""
usage['shell'] = """kmos shell
Open an interactive shell and create a KMC_Model in it
run == shell
"""
usage['run'] = """kmos run
Open an interactive shell and create a KMC_Model in it
run == shell
"""
usage['version'] = """kmos version
Print version number and exit.
"""
usage['view'] = """kmos view
Take a kmc_model.%s and kmc_settings.py in the
same directory and start to simulate the
model visually.
Additional Parameters ::
-v/--steps-per-frame <number>
Number of steps per frame
""" % ('pyd' if os.name == 'nt' else 'so')
usage['xml'] = """kmos xml
Print xml representation of model to stdout
"""
def get_options(args=None, get_parser=False):
import optparse
import os
from glob import glob
import kmos
parser = optparse.OptionParser(
'Usage: %prog [help] ('
+ '|'.join(sorted(usage.keys()))
+ ') [options]',
version=kmos.__version__)
parser.add_option('-s', '--source-only',
dest='source_only',
action='store_true',
default=False)
parser.add_option('-p', '--path-to-f2py',
dest='path_to_f2py',
default='f2py')
parser.add_option('-b', '--backend',
dest='backend',
default='local_smart')
parser.add_option('-a', '--avoid-default-state',
dest='avoid_default_state',
action='store_true',
default=False,
)
parser.add_option('-v', '--steps-per-frame',
dest='steps_per_frame',
type='int',
default='50000')
parser.add_option('-d', '--debug',
default=False,
dest='debug',
action='store_true')
parser.add_option('-n', '--no-compiler-optimization',
default=False,
dest='no_optimize',
action='store_true')
parser.add_option('-o', '--overwrite',
default=False,
action='store_true')
parser.add_option('-l', '--variable-length',
dest='variable_length',
default=95,
type='int')
parser.add_option('-c', '--catmap',
default=False,
action='store_true')
parser.add_option('--acf',
dest='acf',
action='store_true',
default=False,
)
try:
from numpy.distutils.fcompiler import get_default_fcompiler
from numpy.distutils import log
log.set_verbosity(-1, True)
fcompiler = get_default_fcompiler()
except:
fcompiler = 'gfortran'
parser.add_option('-f', '--fcompiler',
dest='fcompiler',
default=os.environ.get('F2PY_FCOMPILER', fcompiler))
if args is not None:
options, args = parser.parse_args(args.split())
else:
options, args = parser.parse_args()
if len(args) < 1:
parser.error('Command expected')
if get_parser:
return options, args, parser
else:
return options, args
def match_keys(arg, usage, parser):
"""Try to match part of a command against
the set of commands from usage. Throws
an error if not successful.
"""
possible_args = [key for key in usage if key.startswith(arg)]
if len(possible_args) == 0:
parser.error('Command "%s" not understood.' % arg)
elif len(possible_args) > 1:
parser.error(('Command "%s" ambiguous.\n'
'Could be one of %s\n\n') % (arg, possible_args))
else:
return possible_args[0]
def main(args=None):
"""The CLI main entry point function.
The optional argument args, can be used to
directly supply command line argument like
$ kmos <args>
otherwise args will be taken from STDIN.
"""
from glob import glob
options, args, parser = get_options(args, get_parser=True)
global model, pt, np, cm_model
if not args[0] in usage.keys():
args[0] = match_keys(args[0], usage, parser)
if args[0] == 'benchmark':
from sys import path
path.append(os.path.abspath(os.curdir))
nsteps = 1000000
from time import time
from kmos.run import KMC_Model
model = KMC_Model(print_rates=False, banner=False)
time0 = time()
try:
model.proclist.do_kmc_steps(nsteps)
except: # kmos < 0.3 had no model.proclist.do_kmc_steps
model.do_steps(nsteps)
needed_time = time() - time0
print('Using the [%s] backend.' % model.get_backend())
print('%s steps took %.2f seconds' % (nsteps, needed_time))
print('Or %.2e steps/s' % (1e6 / needed_time))
model.deallocate()
elif args[0] == 'build':
from kmos.utils import build
build(options)
elif args[0] == 'edit':
from kmos import gui
gui.main()
elif args[0] == 'settings-export':
import kmos.types
import kmos.io
from kmos.io import ProcListWriter
if len(args) < 2:
parser.error('XML file and export path expected.')
if len(args) < 3:
out_dir = '%s_%s' % (os.path.splitext(args[1])[0], options.backend)
print('No export path provided. Exporting to %s' % out_dir)
args.append(out_dir)
xml_file = args[1]
export_dir = args[2]
project = kmos.types.Project()
project.import_file(xml_file)
writer = ProcListWriter(project, export_dir)
writer.write_settings()
elif args[0] == 'export':
import kmos.types
import kmos.io
from kmos.utils import build
if len(args) < 2:
parser.error('XML file and export path expected.')
if len(args) < 3:
out_dir = '%s_%s' % (os.path.splitext(args[1])[0], options.backend)
print('No export path provided. Exporting to %s' % out_dir)
args.append(out_dir)
xml_file = args[1]
export_dir = os.path.join(args[2], 'src')
project = kmos.types.Project()
project.import_file(xml_file)
project.shorten_names(max_length=options.variable_length)
kmos.io.export_source(project,
export_dir,
options=options)
if ((os.name == 'posix'
and os.uname()[0] in ['Linux', 'Darwin'])
or os.name == 'nt') \
and not options.source_only:
os.chdir(export_dir)
build(options)
for out in glob('kmc_*'):
if os.path.exists('../%s' % out) :
if options.overwrite :
overwrite = 'y'
else:
overwrite = raw_input(('Should I overwrite existing %s ?'
'[y/N] ') % out).lower()
if overwrite.startswith('y') :
print('Overwriting {out}'.format(**locals()))
os.remove('../%s' % out)
shutil.move(out, '..')
else :
print('Skipping {out}'.format(**locals()))
else:
shutil.move(out, '..')
elif args[0] == 'settings-export':
import kmos.io
pt = kmos.io.import_file(args[1])
if len(args) < 3:
out_dir = os.path.splitext(args[1])[0]
print('No export path provided. Exporting kmc_settings.py to %s'
% out_dir)
args.append(out_dir)
if not os.path.exists(args[2]):
os.mkdir(args[2])
elif not os.path.isdir(args[2]):
raise UserWarning("Cannot overwrite %s; Exiting;" % args[2])
writer = kmos.io.ProcListWriter(pt, args[2])
writer.write_settings()
elif args[0] == 'help':
if len(args) < 2:
parser.error('Which help do you want?')
if args[1] == 'all':
for command in sorted(usage):
print(usage[command])
elif args[1] in usage:
print('Usage: %s\n' % usage[args[1]])
else:
arg = match_keys(args[1], usage, parser)
print('Usage: %s\n' % usage[arg])
elif args[0] == 'import':
import kmos.io
if not len(args) >= 2:
raise UserWarning('XML file name expected.')
pt = kmos.io.import_xml_file(args[1])
if len(args) == 2:
sh(banner='Note: pt = kmos.io.import_xml(\'%s\')' % args[1])
elif len(args) == 3: # if optional 3rd argument is given, store model there and exit
pt.save(args[2])
elif args[0] == 'rebuild':
from time import sleep
print('Will rebuild model from kmc_settings.py in current directory')
print('Please do not interrupt,'
' build process, as you will most likely')
print('loose the current model files.')
sleep(2.)
from sys import path
path.append(os.path.abspath(os.curdir))
from tempfile import mktemp
if not os.path.exists('kmc_model.so') \
and not os.path.exists('kmc_model.pyd'):
raise Exception('No kmc_model.so found.')
if not os.path.exists('kmc_settings.py'):
raise Exception('No kmc_settings.py found.')
from kmos.run import KMC_Model
model = KMC_Model(print_rates=False, banner=False)
tempfile = mktemp()
f = file(tempfile, 'w')
f.write(model.xml())
f.close()
for kmc_model in glob('kmc_model.*'):
os.remove(kmc_model)
os.remove('kmc_settings.py')
main('export %s -b %s .' % (tempfile, options.backend))
os.remove(tempfile)
model.deallocate()
elif args[0] in ['run', 'shell']:
from sys import path
path.append(os.path.abspath(os.curdir))
from kmos.run import KMC_Model
# useful to have in interactive mode
import numpy as np
try:
from matplotlib import pyplot as plt
except:
plt = None
if options.catmap:
import catmap
import catmap.cli.kmc_runner
seed = catmap.cli.kmc_runner.get_seed_from_path('.')
cm_model = catmap.ReactionModel(setup_file='{seed}.mkm'.format(**locals()))
catmap_message = '\nSide-loaded catmap_model {seed}.mkm into cm_model = ReactionModel(setup_file="{seed}.mkm")'.format(**locals())
else:
catmap_message = ''
try:
model = KMC_Model(print_rates=False)
except:
print("Warning: could not import kmc_model!"
" Please make sure you are in the right directory")
sh(banner='Note: model = KMC_Model(print_rates=False){catmap_message}'.format(**locals()))
try:
model.deallocate()
except:
print("Warning: could not deallocate model. Was is allocated?")
elif args[0] == 'version':
from kmos import VERSION
print(VERSION)
elif args[0] == 'view':
from sys import path
path.append(os.path.abspath(os.curdir))
from kmos import view
view.main(steps_per_frame=options.steps_per_frame)
elif args[0] == 'xml':
from sys import path
path.append(os.path.abspath(os.curdir))
from kmos.run import KMC_Model
model = KMC_Model(banner=False, print_rates=False)
print(model.xml())
else:
parser.error('Command "%s" not understood.' % args[0])
def sh(banner):
"""Wrapper around interactive ipython shell
that factors out ipython version depencies.
"""
from distutils.version import LooseVersion
import IPython
if hasattr(IPython, 'release'):
try:
from IPython.terminal.embed import InteractiveShellEmbed
InteractiveShellEmbed(banner1=banner)()
except ImportError:
try:
from IPython.frontend.terminal.embed \
import InteractiveShellEmbed
InteractiveShellEmbed(banner1=banner)()
except ImportError:
from IPython.Shell import IPShellEmbed
IPShellEmbed(banner=banner)()
else:
from IPython.Shell import IPShellEmbed
IPShellEmbed(banner=banner)()
| gpl-3.0 |
zorojean/scikit-learn | sklearn/preprocessing/data.py | 113 | 56747 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
Tong-Chen/scikit-learn | sklearn/cluster/bicluster/tests/test_utils.py | 10 | 1427 | """Tests for bicluster utilities."""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.cluster.bicluster.utils import get_indicators
from sklearn.cluster.bicluster.utils import get_shape
from sklearn.cluster.bicluster.utils import get_submatrix
def test_get_indicators():
rows = [2, 4, 5]
columns = [0, 1, 3]
shape = (6, 4)
row_ind, col_ind = get_indicators(rows, columns, shape)
assert_array_equal(row_ind, [False, False, True, False, True, True])
assert_array_equal(col_ind, [True, True, False, True])
def test_get_shape():
rows = [True, True, False, False]
cols = [True, False, True, True]
assert_equal(get_shape(rows, cols), (2, 3))
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
rows = [True, True, False, False, True]
cols = [False, False, True, True]
for X in (data, csr_matrix(data)):
submatrix = get_submatrix(rows, cols, X)
if issparse(submatrix):
submatrix = submatrix.todense()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.todense()
assert_true(np.all(X != -1))
| bsd-3-clause |
doutib/lobpredict | lobpredictrst/execute_model.py | 1 | 5878 | import sys
import imp
import yaml
import csv
import pandas as pd
import re
from rf import *
from svm import *
modl = imp.load_source('read_model_yaml', 'read_model_yaml.py')
# Parse the YAML file location as the first parameter
inp_yaml = sys.argv[1]
def write_results_txt(filename, result):
"""
Write results into csv file.
Parameters
----------
filename : string
filename to output the result
labels : list
labels for the results, i.e. names of parameters and metrics
"""
with open(filename, "w") as fp:
for item in result:
fp.write("%s\n\n" % item)
def execute_model(inp_yaml):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
inp_yaml : A yaml file with model specifications
Returns
-------
parameters_dict : A python dictionary with the model specifications
to be used to encode metadata for the model
and pass into specific model functions e.g. random
forest
"""
# Read in and parse all parameters from the YAML file
yaml_params = modl.read_model_yaml(inp_yaml)
# Define output file name based on input
folder_name = re.split("/", inp_yaml)[2]
file_name = re.split("/", inp_yaml)[3][:-5]
output_txt_file = 'data/output/' + folder_name + '/' + file_name + '.txt'
#-------------------------------------------------
# Create Train and Test Datasets
#-------------------------------------------------
data_source_dir = yaml_params["data_source_dir"]
test_type = yaml_params["test_type"]
print('data source dir is: %s' % (data_source_dir))
print('test type is: %s' % (test_type))
if test_type == "test":
train_ds_name = "train.tar.gz"
test_ds_name = "test.tar.gz"
elif test_type == "validation":
train_ds_name = "train_test.tar.gz"
test_ds_name = "validation.tar.gz"
else:
train_ds_name = "train_test_validation.tar.gz"
test_ds_name = "strategy_validation.tar.gz"
train_ds_ref = "data/output/model_clean_data/" + data_source_dir + "/" + train_ds_name
test_ds_ref = "data/output/model_clean_data/" + data_source_dir + "/" + test_ds_name
print('training dataset is: %s' % (train_ds_ref))
print('test dataset is: %s' % (test_ds_ref))
# Open test and train sets
df_train = pd.read_csv(train_ds_ref
, compression='gzip', index_col = None)
df_test = pd.read_csv(test_ds_ref
, compression='gzip', index_col = None)
# Drop the first columns - they are not useful
df_train_clean = df_train.iloc[:,1:]
df_test_clean = df_test.iloc[:,1:]
# Traning data column names - used for variale importance
X_train_cols = list(df_train_clean.drop(['labels', 'index', 'Time'], axis=1).columns.values)
# Define test/training set
X_train = np.array(df_train_clean.drop(['labels', 'index', 'Time'], axis = 1))
Y_train = np.array(df_train_clean[['labels']])[:,0]
X_test = np.array(df_test_clean.drop(['labels', 'index', 'Time'], axis = 1))
Y_test = np.array(df_test_clean[['labels']])[:,0]
#-------------------------------------------------
# Run RF (RANDOM FOREST)
#-------------------------------------------------
if yaml_params["model_type"] == "RF":
# Extract the RF model variables from the YAML file
n_estimators = yaml_params["parameters"]["n_estimators"]
criterion = yaml_params["parameters"]["criterion"]
max_features = yaml_params["parameters"]["max_features"]
max_depth = yaml_params["parameters"]["max_depth"]
n_jobs = yaml_params["parameters"]["n_jobs"]
print('number of trees is: %d' % (n_estimators))
print('max depth is: %d' % (max_depth))
print("running RF WITHOUT simulation...")
# Run simulation
result = rf(X_train_cols = X_train_cols
, X_train = X_train
, Y_train = Y_train
, X_test = X_test
, Y_test = Y_test
, n_estimators = n_estimators
, criterion = criterion
, max_features = max_features
, max_depth = max_depth)
print("finished - rf without simulation")
# Write into text file
write_results_txt(output_txt_file, result)
#-------------------------------------------------
# Run SVM (SUPPORT VECTOR MACHINE)
#-------------------------------------------------
# Extract the SVM model variables from the YAML file
if yaml_params["model_type"] == "SVM":
kernel = yaml_params["parameters"]["kernel"]
degree = yaml_params["parameters"]["degree"]
gamma = yaml_params["parameters"]["gamma"]
tol = yaml_params["parameters"]["tol"]
C = yaml_params["parameters"]["C"]
print('The value of C is: %.2f' % (C))
print("running SVM WITHOUT simulation...")
# Run a single simulation
result = svm(X_train = X_train
, Y_train = Y_train
, X_test = X_test
, Y_test = Y_test
, kernel = kernel
, C = C
, degree = degree
, gamma = gamma
, tol = tol
, decision_function_shape='ovr')
# Write into text file
write_results_txt(output_txt_file, result)
print("finished - SVM without simulation")
# Run the execute model code
execute_model(inp_yaml)
| isc |
hep-gc/panda-autopyfactory | bin/factory.py | 1 | 6335 | #! /usr/bin/env python
#
# Simple(ish) python condor_g factory for panda pilots
#
# $Id$
#
#
# Copyright (C) 2007,2008,2009 Graeme Andrew Stewart
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from optparse import OptionParser
import logging
import logging.handlers
import time
import os
import sys
import traceback
# Need to set PANDA_URL_MAP before the Client module is loaded (which happens
# when the Factory module is loaded). Unfortunately this means that logging
# is not yet available.
if not 'APF_NOSQUID' in os.environ:
if not 'PANDA_URL_MAP' in os.environ:
os.environ['PANDA_URL_MAP'] = 'CERN,http://pandaserver.cern.ch:25085/server/panda,https://pandaserver.cern.ch:25443/server/panda'
print >>sys.stderr, 'FACTORY DEBUG: Set PANDA_URL_MAP to %s' % os.environ['PANDA_URL_MAP']
else:
print >>sys.stderr, 'FACTORY DEBUG: Found PANDA_URL_MAP set to %s. Not changed.' % os.environ['PANDA_URL_MAP']
if not 'PANDA_URL' in os.environ:
os.environ['PANDA_URL'] = 'http://pandaserver.cern.ch:25085/server/panda'
print >>sys.stderr, 'FACTORY DEBUG: Set PANDA_URL to %s' % os.environ['PANDA_URL']
else:
print >>sys.stderr, 'FACTORY DEBUG: Found PANDA_URL set to %s. Not changed.' % os.environ['PANDA_URL']
else:
print >>sys.stderr, 'FACTORY DEBUG: Found APF_NOSQUID set. Not changing/setting panda client environment.'
from autopyfactory.Factory import factory
from autopyfactory.Exceptions import FactoryConfigurationFailure
def main():
parser = OptionParser(usage='''%prog [OPTIONS]
autopyfactory is an ATLAS pilot factory.
This program is licenced under the GPL, as set out in LICENSE file.
Author(s):
Graeme A Stewart <g.stewart@physics.gla.ac.uk>, Peter Love <p.love@lancaster.ac.uk>
''', version="%prog $Id$")
parser.add_option("--verbose", "--debug", dest="logLevel", default=logging.INFO,
action="store_const", const=logging.DEBUG, help="Set logging level to DEBUG [default INFO]")
parser.add_option("--quiet", dest="logLevel",
action="store_const", const=logging.WARNING, help="Set logging level to WARNING [default INFO]")
parser.add_option("--test", "--dry-run", dest="dryRun", default=False,
action="store_true", help="Dry run - supress job submission")
parser.add_option("--oneshot", "--one-shot", dest="cyclesToDo", default=0,
action="store_const", const=1, help="Run one cycle only")
parser.add_option("--cycles", dest="cyclesToDo",
action="store", type="int", metavar="CYCLES", help="Run CYCLES times, then exit [default infinite]")
parser.add_option("--sleep", dest="sleepTime", default=120,
action="store", type="int", metavar="TIME", help="Sleep TIME seconds between cycles [default %default]")
parser.add_option("--conf", dest="confFiles", default="factory.conf",
action="store", metavar="FILE1[,FILE2,FILE3]", help="Load configuration from FILEs (comma separated list)")
parser.add_option("--log", dest="logfile", default="syslog", metavar="LOGFILE", action="store",
help="Send logging output to LOGFILE or SYSLOG or stdout [default <syslog>]")
(options, args) = parser.parse_args()
options.confFiles = options.confFiles.split(',')
# Setup logging
factoryLogger = logging.getLogger('main')
if options.logfile == "stdout":
logStream = logging.StreamHandler()
elif options.logfile == 'syslog':
logStream = logging.handlers.SysLogHandler('/dev/log')
else:
logStream = logging.handlers.RotatingFileHandler(filename=options.logfile, maxBytes=10000000, backupCount=5)
formatter = logging.Formatter('%(asctime)s - %(name)s: %(levelname)s %(message)s')
logStream.setFormatter(formatter)
factoryLogger.addHandler(logStream)
factoryLogger.setLevel(options.logLevel)
factoryLogger.debug('logging initialised')
# Main loop
try:
f = factory(factoryLogger, options.dryRun, options.confFiles)
cyclesDone = 0
while True:
factoryLogger.info('\nStarting factory cycle %d at %s', cyclesDone, time.asctime(time.localtime()))
f.factorySubmitCycle(cyclesDone)
factoryLogger.info('Factory cycle %d done' % cyclesDone)
cyclesDone += 1
if cyclesDone == options.cyclesToDo:
break
factoryLogger.info('Sleeping %ds' % options.sleepTime)
time.sleep(options.sleepTime)
f.updateConfig(cyclesDone)
except KeyboardInterrupt:
factoryLogger.info('Caught keyboard interrupt - exiting')
except FactoryConfigurationFailure, errMsg:
factoryLogger.error('Factory configuration failure: %s', errMsg)
except ImportError, errorMsg:
factoryLogger.error('Failed to import necessary python module: %s' % errorMsg)
except:
# TODO - make this a logger.exception() call
factoryLogger.error('''Unexpected exception! There was an exception
raised which the factory was not expecting and did not know how to
handle. You may have discovered a new bug or an unforseen error
condition. Please report this exception to Graeme
<g.stewart@physics.gla.ac.uk>. The factory will now re-raise this
exception so that the python stack trace is printed, which will allow
it to be debugged - please send output from this message
onwards. Exploding in 5...4...3...2...1... Have a nice day!''')
# The following line prints the exception to the logging module
factoryLogger.error(traceback.format_exc(None))
raise
if __name__ == "__main__":
main()
| gpl-3.0 |
gandalfcode/gandalf | tests/paper_tests/binaryorbit.py | 1 | 3711 | #==============================================================================
# freefalltest.py
# Run the freefall collapse test using initial conditions specified in the
# file 'freefall.dat'.
#==============================================================================
from gandalf.analysis.facade import *
from gandalf.analysis.data_fetcher import *
from gandalf.analysis.compute import particle_data
from gandalf.analysis.SimBuffer import SimBuffer, BufferException
import time
import matplotlib.pyplot as plt
import numpy as np
import math
from matplotlib import rc
from mpl_toolkits.axes_grid1 import AxesGrid
#--------------------------------------------------------------------------------------------------
rc('font', **{'family': 'normal', 'weight' : 'bold', 'size' : 16})
rc('text', usetex=True)
# Binary parameters
m1 = 0.5
m2 = 0.5
abin = 1.0
ebin = 0.5
etot0 = -0.5*m1*m2/abin
period = 2.0*math.pi*math.sqrt(abin*abin*abin/(m1 + m2))
xmin = -0.6
xmax = 2.1
ymin = -0.85
ymax = 0.85
xsize = xmax - xmin
ysize = ymax - ymin
CreateTimeData('x',particle_data,quantity='x')
CreateTimeData('y',particle_data,quantity='y')
# Leapfrog KDK
kdksim = newsim('binaryorbit.dat')
kdksim.SetParam('nbody','lfkdk')
setupsim()
run()
x_kdk = get_time_data("t","x")
y_kdk = get_time_data("t","y")
# Leapfrog DKD
dkdsim = newsim('binaryorbit.dat')
dkdsim.SetParam('nbody','lfdkd')
setupsim()
run()
x_dkd = get_time_data("t","x")
y_dkd = get_time_data("t","y")
# 4th-order Hermite
hermite4sim = newsim('binaryorbit.dat')
hermite4sim.SetParam('nbody','hermite4')
setupsim()
run()
x_hermite4 = get_time_data("t","x")
y_hermite4 = get_time_data("t","y")
# 4th-order Hermite TS
hermite4tssim = newsim('binaryorbit.dat')
hermite4tssim.SetParam('nbody','hermite4ts')
hermite4tssim.SetParam('Npec',5)
setupsim()
run()
x_4ts = get_time_data("t","x")
y_4ts = get_time_data("t","y")
# 6th-order Hermite
#hermite6tssim = newsim('binaryorbit.dat')
#hermite6tssim.SetParam('nbody','hermite6ts')
#hermite6tssim.SetParam('Npec',5)
#setupsim()
#run()
#x_6ts = get_time_data("t","x")
#y_6ts = get_time_data("t","y")
# Create matplotlib figure object with shared x-axis
#--------------------------------------------------------------------------------------------------
#fig, axarr = plt.subplots(2, 1, sharex='col', sharey='row', figsize=(10,4))
fig, axarr = plt.subplots(4, 1, figsize=(6,11), sharex='col', sharey='row')
fig.subplots_adjust(hspace=0.001, wspace=0.001)
fig.subplots_adjust(bottom=0.06, top=0.98, left=0.14, right=0.98)
axarr[0].set_ylabel(r"$y$")
axarr[0].set_ylim([ymin, ymax])
axarr[0].set_xlim([xmin, xmax])
axarr[0].plot(x_kdk.y_data, y_kdk.y_data, color="black", linestyle='-', label='Leapfrog KDK', lw=1.0)
axarr[0].text(xmin + 0.02*xsize, ymax - 0.1*ysize, "(a) Leapfrog-KDK", fontsize=12)
axarr[1].set_ylabel(r"$y$")
axarr[1].set_ylim([ymin, ymax])
axarr[1].plot(x_dkd.y_data, y_dkd.y_data, color="black", linestyle='-', label='Leapfrog DKD', lw=1.0)
axarr[1].text(xmin + 0.02*xsize, ymax - 0.1*ysize, "(b) Leapfrog-DKD", fontsize=12)
axarr[2].set_ylabel(r"$y$")
axarr[2].set_ylim([ymin, ymax])
axarr[2].plot(x_hermite4.y_data, y_hermite4.y_data, color="black", linestyle='-', label='4H', lw=1.0)
axarr[2].text(xmin + 0.02*xsize, ymax - 0.1*ysize, "(c) 4th-order Hermite", fontsize=12)
axarr[3].set_xlabel(r"$x$")
axarr[3].set_ylabel(r"$y$")
axarr[3].set_ylim([ymin, ymax])
axarr[3].plot(x_4ts.y_data, y_4ts.y_data, color="black", linestyle='-', label='4TS', lw=1.0)
axarr[3].text(xmin + 0.02*xsize, ymax - 0.1*ysize, "(d) 4th-order Hermite TS", fontsize=12)
plt.show()
fig.savefig('binaryorbit.pdf', dpi=50)
# Prevent program from closing before showing plot window
block()
| gpl-2.0 |
IssamLaradji/scikit-learn | sklearn/linear_model/ransac.py | 16 | 13870 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
if y.ndim == 1:
y = y.reshape(-1, 1)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
if y_pred.ndim == 1:
y_pred = y_pred[:, None]
residuals_subset = residual_metric(y_pred - y)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
return self.estimator_.score(X, y)
| bsd-3-clause |
hilaskis/UAV_MissionPlanner | Lib/site-packages/numpy/linalg/linalg.py | 53 | 61098 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError']
import sys
from numpy.core import array, asarray, zeros, empty, transpose, \
intc, single, double, csingle, cdouble, inexact, complexfloating, \
newaxis, ravel, all, Inf, dot, add, multiply, identity, sqrt, \
maximum, flatnonzero, diagonal, arange, fastCopyAndTranspose, sum, \
isfinite, size, finfo, absolute, log, exp
from numpy.lib import triu
from numpy.linalg import lapack_lite
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError, 'Singular matrix'
numpy.linalg.linalg.LinAlgError: Singular matrix
"""
pass
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError, '%d-dimensional array given. Array must be \
two-dimensional' % len(a.shape)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError, 'Array must be square'
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError, "Array must not contain infs or NaNs"
def _assertNonEmpty(*arrays):
for a in arrays:
if size(a) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a,wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = range(0, an)
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : array_like, shape (M, M)
Coefficient matrix.
b : array_like, shape (M,) or (M, N)
Ordinate or "dependent variable" values.
Returns
-------
x : ndarray, shape (M,) or (M, N) depending on b
Solution to the system a x = b
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
`solve` is a wrapper for the LAPACK routines `dgesv`_ and
`zgesv`_, the former being used if `a` is real-valued, the latter if
it is complex-valued. The solution to the system of linear equations
is computed using an LU decomposition [1]_ with partial pivoting and
row interchanges.
.. _dgesv: http://www.netlib.org/lapack/double/dgesv.f
.. _zgesv: http://www.netlib.org/lapack/complex16/zgesv.f
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> (np.dot(a, x) == b).all()
True
"""
a, _ = _makearray(a)
b, wrap = _makearray(b)
one_eq = len(b.shape) == 1
if one_eq:
b = b[:, newaxis]
_assertRank2(a, b)
_assertSquareness(a)
n_eq = a.shape[0]
n_rhs = b.shape[1]
if n_eq != b.shape[0]:
raise LinAlgError, 'Incompatible dimensions'
t, result_t = _commonType(a, b)
# lapack_routine = _findLapackRoutine('gesv', t)
if isComplexType(t):
lapack_routine = lapack_lite.zgesv
else:
lapack_routine = lapack_lite.dgesv
a, b = _fastCopyAndTranspose(t, a, b)
a, b = _to_native_byte_order(a, b)
pivots = zeros(n_eq, fortran_int)
results = lapack_routine(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0)
if results['info'] > 0:
raise LinAlgError, 'Singular matrix'
if one_eq:
return wrap(b.ravel().astype(result_t))
else:
return wrap(b.transpose().astype(result_t))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[:ind] + a.shape[ind:]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError, "Invalid ind argument."
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : array_like, shape (M, M)
Matrix to be inverted.
Returns
-------
ainv : ndarray or matrix, shape (M, M)
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular or not square.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = LA.inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = LA.inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
"""
a, wrap = _makearray(a)
return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : array_like, shape (M, M)
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : ndarray, or matrix object if `a` is, shape (M, M)
Lower-triangular Cholesky factor of a.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
m = a.shape[0]
n = a.shape[1]
if isComplexType(t):
lapack_routine = lapack_lite.zpotrf
else:
lapack_routine = lapack_lite.dpotrf
results = lapack_routine(_L, n, a, m, 0)
if results['info'] > 0:
raise LinAlgError, 'Matrix is not positive definite - \
Cholesky decomposition cannot be computed'
s = triu(a, k=0).transpose()
if (s.dtype != result_t):
s = s.astype(result_t)
return wrap(s)
# QR decompostion
def qr(a, mode='full'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like
Matrix to be factored, of shape (M, N).
mode : {'full', 'r', 'economic'}, optional
Specifies the values to be returned. 'full' is the default.
Economic mode is slightly faster then 'r' mode if only `r` is needed.
Returns
-------
q : ndarray of float or complex, optional
The orthonormal matrix, of shape (M, K). Only returned if
``mode='full'``.
r : ndarray of float or complex, optional
The upper-triangular matrix, of shape (K, N) with K = min(M, N).
Only returned when ``mode='full'`` or ``mode='r'``.
a2 : ndarray of float or complex, optional
Array of shape (M, N), only returned when ``mode='economic``'.
The diagonal and the upper triangle of `a2` contains `r`, while
the rest of the matrix is undefined.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved, so if `a` is of type `matrix`,
all the return values will be matrices too.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
a, wrap = _makearray(a)
_assertRank2(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# economic mode. Isn't actually economic.
if mode[0] == 'e':
if t != result_t :
a = a.astype(result_t)
return a.T
# generate r
r = _fastCopyAndTranspose(result_t, a[:,:mn])
for i in range(mn):
r[i,:i].fill(0.0)
# 'r'-mode, that is, calculate only r
if mode[0] == 'r':
return r
# from here on: build orthonormal matrix q from a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mn, mn, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mn, mn, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
q = _fastCopyAndTranspose(result_t, a[:mn,:])
return wrap(q), wrap(r)
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : array_like, shape (M, M)
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
This is a simple interface to the LAPACK routines dgeev and zgeev
that sets those routines' flags to return only the eigenvalues of
general real and complex arrays, respectively.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
dummy = zeros((1,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeev
w = zeros((n,), t)
rwork = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, w,
dummy, 1, dummy, 1, work, -1, rwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, w,
dummy, 1, dummy, 1, work, lwork, rwork, 0)
else:
lapack_routine = lapack_lite.dgeev
wr = zeros((n,), t)
wi = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, wr, wi,
dummy, 1, dummy, 1, work, -1, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, wr, wi,
dummy, 1, dummy, 1, work, lwork, 0)
if all(wi == 0.):
w = wr
result_t = _realType(result_t)
else:
w = wr+1j*wi
result_t = _complexType(result_t)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
return w.astype(result_t)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : array_like, shape (M, M)
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, not necessarily ordered, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
This is a simple interface to the LAPACK routines dsyevd and zheevd
that sets those routines' flags to return only the eigenvalues of
real symmetric and complex Hermitian arrays, respectively.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288+0.j, 5.82842712+0.j])
"""
UPLO = asbytes(UPLO)
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
liwork = 5*n+3
iwork = zeros((liwork,), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zheevd
w = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
lrwork = 1
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
rwork, -1, iwork, liwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
lrwork = int(rwork[0])
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
rwork, lrwork, iwork, liwork, 0)
else:
lapack_routine = lapack_lite.dsyevd
w = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
iwork, liwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
iwork, liwork, 0)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
return w.astype(result_t)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : array_like, shape (M, M)
A square array of real or complex elements.
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered, nor are they
necessarily real for real arrays (though for real arrays
complex-valued eigenvalues should occur in conjugate pairs).
v : ndarray, shape (M, M)
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
eigvals : eigenvalues of a non-symmetric array.
Notes
-----
This is a simple interface to the LAPACK routines dgeev and zgeev
which compute the eigenvalues and eigenvectors of, respectively,
general real- and complex-valued square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[i,:], v[i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
_assertFinite(a)
a, t, result_t = _convertarray(a) # convert to double or cdouble type
a = _to_native_byte_order(a)
real_t = _linalgRealType(t)
n = a.shape[0]
dummy = zeros((1,), t)
if isComplexType(t):
# Complex routines take different arguments
lapack_routine = lapack_lite.zgeev
w = zeros((n,), t)
v = zeros((n, n), t)
lwork = 1
work = zeros((lwork,), t)
rwork = zeros((2*n,), real_t)
results = lapack_routine(_N, _V, n, a, n, w,
dummy, 1, v, n, work, -1, rwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, w,
dummy, 1, v, n, work, lwork, rwork, 0)
else:
lapack_routine = lapack_lite.dgeev
wr = zeros((n,), t)
wi = zeros((n,), t)
vr = zeros((n, n), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, wr, wi,
dummy, 1, vr, n, work, -1, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, wr, wi,
dummy, 1, vr, n, work, lwork, 0)
if all(wi == 0.0):
w = wr
v = vr
result_t = _realType(result_t)
else:
w = wr+1j*wi
v = array(vr, w.dtype)
ind = flatnonzero(wi != 0.0) # indices of complex e-vals
for i in range(len(ind)//2):
v[ind[2*i]] = vr[ind[2*i]] + 1j*vr[ind[2*i+1]]
v[ind[2*i+1]] = vr[ind[2*i]] - 1j*vr[ind[2*i+1]]
result_t = _complexType(result_t)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
vt = v.transpose().astype(result_t)
return w.astype(result_t), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : array_like, shape (M, M)
A complex Hermitian or real symmetric matrix.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, not necessarily ordered.
v : ndarray, or matrix object if `a` is, shape (M, M)
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
This is a simple interface to the LAPACK routines dsyevd and zheevd,
which compute the eigenvalues and eigenvectors of real symmetric and
complex Hermitian arrays, respectively.
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = asbytes(UPLO)
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
liwork = 5*n+3
iwork = zeros((liwork,), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zheevd
w = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
lrwork = 1
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
rwork, -1, iwork, liwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
lrwork = int(rwork[0])
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
rwork, lrwork, iwork, liwork, 0)
else:
lapack_routine = lapack_lite.dsyevd
w = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
iwork, liwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
iwork, liwork, 0)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
at = a.transpose().astype(result_t)
return w.astype(_realType(result_t)), wrap(at)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : ndarray
Unitary matrix. The shape of `u` is (`M`, `M`) or (`M`, `K`)
depending on value of ``full_matrices``.
s : ndarray
The singular values, sorted so that ``s[i] >= s[i+1]``. `s` is
a 1-d array of length min(`M`, `N`).
v : ndarray
Unitary matrix of shape (`N`, `N`) or (`K`, `N`), depending on
``full_matrices``.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertNonEmpty(a)
m, n = a.shape
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
s = zeros((min(n, m),), real_t)
if compute_uv:
if full_matrices:
nu = m
nvt = n
option = _A
else:
nu = min(n, m)
nvt = min(n, m)
option = _S
u = zeros((nu, m), t)
vt = zeros((n, nvt), t)
else:
option = _N
nu = 1
nvt = 1
u = empty((1, 1), t)
vt = empty((1, 1), t)
iwork = zeros((8*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgesdd
rwork = zeros((5*min(m, n)*min(m, n) + 5*min(m, n),), real_t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgesdd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError, 'SVD did not converge'
s = s.astype(_realType(result_t))
if compute_uv:
u = u.transpose().astype(result_t)
vt = vt.transpose().astype(result_t)
return wrap(u), s, wrap(vt)
else:
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : array_like, shape (M, N)
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x,compute_uv=False)
return s[0]/s[-1]
else:
return norm(x,p)*norm(inv(x),p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the
array that are greater than `tol`.
Parameters
----------
M : array_like
array of <=2 dimensions
tol : {None, float}
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * eps``.
Notes
-----
Golub and van Loan [1]_ define "numerical rank deficiency" as using
tol=eps*S[0] (where S[0] is the maximum singular value and thus the
2-norm of the matrix). This is one definition of rank deficiency,
and the one we use here. When floating point roundoff is the main
concern, then "numerical rank deficiency" is a reasonable choice. In
some cases you may prefer other definitions. The most useful measure
of the tolerance depends on the operations you intend to use on your
matrix. For example, if your data come from uncertain measurements
with uncertainties greater than floating point epsilon, choosing a
tolerance near that uncertainty may be preferable. The tolerance
may be absolute if the uncertainties are absolute rather than
relative.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*.
Baltimore: Johns Hopkins University Press, 1996.
Examples
--------
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : ndarray, shape (N, M)
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNonEmpty(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis],transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, than a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : array_like, shape (M, M)
Input array.
Returns
-------
sign : float or complex
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : float
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to `sign * np.exp(logdet)`.
Notes
-----
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
.. versionadded:: 2.0.0.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
See Also
--------
det
"""
a = asarray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
if isComplexType(t):
lapack_routine = lapack_lite.zgetrf
else:
lapack_routine = lapack_lite.dgetrf
pivots = zeros((n,), fortran_int)
results = lapack_routine(n, n, a, n, pivots, 0)
info = results['info']
if (info < 0):
raise TypeError, "Illegal input to Fortran routine"
elif (info > 0):
return (t(0.0), _realType(t)(-Inf))
sign = 1. - 2. * (add.reduce(pivots != arange(1, n + 1)) % 2)
d = diagonal(a)
absd = absolute(d)
sign *= multiply.reduce(d / absd)
log(absd, absd)
logdet = add.reduce(absd, axis=-1)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : array_like, shape (M, M)
Input array.
Returns
-------
det : ndarray
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
"""
sign, logdet = slogdet(a)
return sign * exp(logdet)
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : array_like, shape (M, N)
"Coefficient" matrix.
b : array_like, shape (M,) or (M, K)
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : ndarray, shape (N,) or (N, K)
Least-squares solution. The shape of `x` depends on the shape of
`b`.
residues : ndarray, shape (), (1,), or (K,)
Sums of residues; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or > M, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : ndarray, shape (min(M,N),)
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError, 'Incompatible dimensions'
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0],:n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError, 'SVD did not converge in Linear Least Squares'
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
st = s[:min(n, m)].copy().astype(result_real_t)
return wrap(x), wrap(resids), results['rank'], st
def norm(x, ord=None):
"""
Matrix or vector norm.
This function is able to return one of seven different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like, shape (M,) or (M, N)
Input array.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
Returns
-------
n : float
Norm of the matrix or vector.
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
"""
x = asarray(x)
if ord is None: # check the default case first and handle it immediately
return sqrt(add.reduce((x.conj() * x).ravel().real))
nd = x.ndim
if nd == 1:
if ord == Inf:
return abs(x).max()
elif ord == -Inf:
return abs(x).min()
elif ord == 0:
return (x != 0).sum() # Zero norm
elif ord == 1:
return abs(x).sum() # special case for speedup
elif ord == 2:
return sqrt(((x.conj()*x).real).sum()) # special case for speedup
else:
try:
ord + 1
except TypeError:
raise ValueError, "Invalid norm order for vectors."
return ((abs(x)**ord).sum())**(1.0/ord)
elif nd == 2:
if ord == 2:
return svd(x, compute_uv=0).max()
elif ord == -2:
return svd(x, compute_uv=0).min()
elif ord == 1:
return abs(x).sum(axis=0).max()
elif ord == Inf:
return abs(x).sum(axis=1).max()
elif ord == -1:
return abs(x).sum(axis=0).min()
elif ord == -Inf:
return abs(x).sum(axis=1).min()
elif ord in ['fro','f']:
return sqrt(add.reduce((x.conj() * x).real.ravel()))
else:
raise ValueError, "Invalid norm order for matrices."
else:
raise ValueError, "Improper number of dimensions to norm."
| gpl-2.0 |
ishanic/scikit-learn | sklearn/preprocessing/data.py | 113 | 56747 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
mueller-lab/PyFRAP | pyfrp/modules/pyfrp_optimization_module.py | 2 | 6867 | #=====================================================================================================================================
#Copyright
#=====================================================================================================================================
#Copyright (C) 2014 Alexander Blaessle, Patrick Mueller and the Friedrich Miescher Laboratory of the Max Planck Society
#This software is distributed under the terms of the GNU General Public License.
#This file is part of PyFRAP.
#PyFRAP is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#===========================================================================================================================================================================
#Module Description
#===========================================================================================================================================================================
"""Optimization module for PyFRAP toolbox.
Currently contains all functions necessary to transform a constrained FRAP optimization problem into
a unconstrained one, making it suitable to Nelder-Mead optimization algorithm.
"""
#===========================================================================================================================================================================
#Importing necessary modules
#===========================================================================================================================================================================
#Numpy/Scipy
import numpy as np
#PyFRAP
import pyfrp_fit_module
from pyfrp_term_module import *
#===========================================================================================================================================================================
#Module Functions
#===========================================================================================================================================================================
def constrObjFunc(x,fit,debug,ax,returnFit):
"""Objective function when using Constrained Nelder-Mead.
Calls :py:func:`pyfrp.modules.pyfrp_optimization_module.xTransform` to transform x into
constrained version, then uses :py:func:`pyfrp.modules.pyfrp_fit_module.FRAPObjFunc` to
find SSD.
Args:
x (list): Input vector, consiting of [D,(prod),(degr)].
fit (pyfrp.subclasses.pyfrp_fit): Fit object.
debug (bool): Display debugging output and plots.
ax (matplotlib.axes): Axes to display plots in.
returnFit (bool): Return fit instead of SSD.
Returns:
float: SSD of fit. Except ``returnFit==True``, then will return fit itself.
"""
LBs, UBs = buildBoundLists(fit)
x=xTransform(x,LBs,UBs)
ssd=pyfrp_fit_module.FRAPObjFunc(x,fit,debug,ax,returnFit)
return ssd
def xTransform(x,LB,UB):
"""Transforms ``x`` into constrained form, obeying upper
bounds ``UB`` and lower bounds ``LB``.
.. note:: Will add tiny offset to LB(D), to avoid singularities.
Idea taken from http://www.mathworks.com/matlabcentral/fileexchange/8277-fminsearchbnd--fminsearchcon
Args:
x (list): Input vector, consiting of [D,(prod),(degr)].
LB (list): List of lower bounds for ``D,prod,degr``.
UB (list): List of upper bounds for ``D,prod,degr``.
Returns:
list: Transformed x-values.
"""
#Make sure everything is float
x=np.asarray(x,dtype=np.float64)
LB=np.asarray(LB,dtype=np.float64)
UB=np.asarray(UB,dtype=np.float64)
#Check if LB_D==0, then add a little noise to it so we do not end up with xtrans[D]==0 and later have singularities when scaling tvec
if LB[0]==0:
LB[0]=1E-10
#Determine number of parameters to be fitted
nparams=len(x)
#Make empty vector
xtrans = np.zeros(np.shape(x))
# k allows some variables to be fixed, thus dropped from the
# optimization.
k=0
for i in range(nparams):
#Upper bound only
if UB[i]!=None and LB[i]==None:
xtrans[i]=UB[i]-x[k]**2
k=k+1
#Lower bound only
elif UB[i]==None and LB[i]!=None:
xtrans[i]=LB[i]+x[k]**2
k=k+1
#Both bounds
elif UB[i]!=None and LB[i]!=None:
xtrans[i] = (np.sin(x[k])+1.)/2.*(UB[i] - LB[i]) + LB[i]
xtrans[i] = max([LB[i],min([UB[i],xtrans[i]])])
k=k+1
#No bounds
elif UB[i]==None and LB[i]==None:
xtrans[i] = x[k]
k=k+1
#Note: The original file has here another case for fixed variable, but since we made the decision earlier which when we call frap_fitting, we don't need this here.
return xtrans
def transformX0(x0,LB,UB):
"""Transforms ``x0`` into constrained form, obeying upper
bounds ``UB`` and lower bounds ``LB``.
Idea taken from http://www.mathworks.com/matlabcentral/fileexchange/8277-fminsearchbnd--fminsearchcon
Args:
x0 (list): Input initial vector, consiting of [D,(prod),(degr)].
LB (list): List of lower bounds for ``D,prod,degr``.
UB (list): List of upper bounds for ``D,prod,degr``.
Returns:
list: Transformed x-values.
"""
x0u = list(x0)
nparams=len(x0)
k=0
for i in range(nparams):
#Upper bound only
if UB[i]!=None and LB[i]==None:
if UB[i]<=x0[i]:
x0u[k]=0
else:
x0u[k]=sqrt(UB[i]-x0[i])
k=k+1
#Lower bound only
elif UB[i]==None and LB[i]!=None:
if LB[i]>=x0[i]:
x0u[k]=0
else:
x0u[k]=np.sqrt(x0[i]-LB[i])
k=k+1
#Both bounds
elif UB[i]!=None and LB[i]!=None:
if UB[i]<=x0[i]:
x0u[k]=np.pi/2
elif LB[i]>=x0[i]:
x0u[k]=-np.pi/2
else:
x0u[k] = 2*(x0[i] - LB[i])/(UB[i]-LB[i]) - 1;
#shift by 2*pi to avoid problems at zero in fminsearch otherwise, the initial simplex is vanishingly small
x0u[k] = 2*np.pi+np.arcsin(max([-1,min(1,x0u[k])]));
k=k+1
#No bounds
elif UB[i]==None and LB[i]==None:
x0u[k] = x[i]
k=k+1
return x0u
def buildBoundLists(fit):
"""Builds list of lower bounds and upper bounds.
Args:
fit (pyfrp.subclasses.pyfrp_fit): Fit object.
Returns:
tuple: Tuple containing:
* LBs (list): List of lower bounds.
* UBs (list): List of upper bounds.
"""
LBs=[fit.LBD]+int(fit.fitProd)*[fit.LBProd]+int(fit.fitDegr)*[fit.LBDegr]+len(fit.ROIsFitted)*[fit.LBEqu]
UBs=[fit.UBD]+int(fit.fitProd)*[fit.UBProd]+int(fit.fitDegr)*[fit.UBDegr]+len(fit.ROIsFitted)*[fit.UBEqu]
return LBs,UBs | gpl-3.0 |
ryanraaum/african-mtdna | popdata_sources/coelho2009/process.py | 1 | 2502 | from oldowan.mtconvert import seq2sites, sites2seq, str2sites
from string import translate
import pandas as pd
import sys
sys.path.append('../../scripts')
from utils import *
## load metadata
metadata = pd.read_csv('metadata.csv', index_col=0)
regionparts = metadata.ix[0,'SeqRange'].split(';')
region1 = range2region(regionparts[0])
region2 = range2region(regionparts[1])
with open('coelho2009_haplotypes.csv', 'rU') as f:
f.readline() # skip past header
data = f.readlines()
hids = []
hvr1sites = []
hvr2sites = []
for l in data:
parts = l.strip().split(',')
if int(parts[3]) == 377 and int(parts[7]) == 268:
hids.append(parts[0])
hvr1sites.append(parts[4])
hvr2sites.append(parts[8])
## need to preprocess sites data for some nonstandard notation in hvr2
hvr1 = []
hvr2 = []
for i in range(len(hids)):
s1 = str2sites(hvr1sites[i], add16k=True)
hvr1.append(s1)
s2 = hvr2sites[i].split()
s2new = []
for j in range(len(s2)):
if s2[j].endswith('.2C'):
parts = s2[j].split('.')
s2new.append('%s.1C' % parts[0])
s2new.append('%s.2C' % parts[0])
else:
s2new.append(s2[j])
s2 = str2sites(' '.join(s2new))
hvr2.append(s2)
newsites = []
for i in range(len(hvr1)):
newsites.append(hvr1[i] + hvr2[i])
## Validate
passed_validation = True
for i in range(len(newsites)):
curr_sites = newsites[i]
seq1 = translate(sites2seq(curr_sites, region1), None, '-')
seq2 = translate(sites2seq(curr_sites, region2), None, '-')
mysites = seq2sites(seq1) + seq2sites(seq1)
if not mysites == curr_sites:
myseq1 = translate(sites2seq(mysites, region1), None, '-')
myseq2 = translate(sites2seq(mysites, region2), None, '-')
if not seq1 == myseq1 and seq2 == myseq2:
passed_validation = False
print i, hids[i]
if passed_validation:
counts = pd.read_csv('coelho2009_counts.csv', index_col=0)
counts = counts.fillna(0)
counter = [0] * 5
with open('processed.csv', 'w') as f:
for i in range(len(newsites)):
hid = hids[i]
curr_sites = newsites[i]
seq1 = translate(sites2seq(curr_sites, region1), None, '-')
seq2 = translate(sites2seq(curr_sites, region2), None, '-')
mysites = seq2sites(seq1) + seq2sites(seq2)
mysites = ' '.join([str(x) for x in mysites])
for j in range(len(metadata.index)):
prefix = metadata.ix[metadata.index[j],'NewPrefix']
for k in range(int(counts.ix[hid, metadata.index[j]])):
counter[j] += 1
num = str(counter[j]).zfill(3)
newid = prefix + num
f.write('%s,%s,%s\n' % (newid, hid, mysites)) | cc0-1.0 |
koobonil/Boss2D | Boss2D/addon/_old/webrtc-qt5.11.2_for_boss/tools_webrtc/cpu/cpu_mon.py | 6 | 2057 | #!/usr/bin/env python
#
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import psutil
import sys
import numpy
from matplotlib import pyplot
class CpuSnapshot(object):
def __init__(self, label):
self.label = label
self.samples = []
def Capture(self, sample_count):
print ('Capturing %d CPU samples for %s...' %
((sample_count - len(self.samples)), self.label))
while len(self.samples) < sample_count:
self.samples.append(psutil.cpu_percent(1.0, False))
def Text(self):
return ('%s: avg=%s, median=%s, min=%s, max=%s' %
(self.label, numpy.average(self.samples),
numpy.median(self.samples),
numpy.min(self.samples), numpy.max(self.samples)))
def Max(self):
return numpy.max(self.samples)
def GrabCpuSamples(sample_count):
print 'Label for snapshot (enter to quit): '
label = raw_input().strip()
if len(label) == 0:
return None
snapshot = CpuSnapshot(label)
snapshot.Capture(sample_count)
return snapshot
def main():
print 'How many seconds to capture per snapshot (enter for 60)?'
sample_count = raw_input().strip()
if len(sample_count) > 0 and int(sample_count) > 0:
sample_count = int(sample_count)
else:
print 'Defaulting to 60 samples.'
sample_count = 60
snapshots = []
while True:
snapshot = GrabCpuSamples(sample_count)
if snapshot == None:
break
snapshots.append(snapshot)
if len(snapshots) == 0:
print 'no samples captured'
return -1
pyplot.title('CPU usage')
for s in snapshots:
pyplot.plot(s.samples, label=s.Text(), linewidth=2)
pyplot.legend()
pyplot.show()
return 0
if __name__ == '__main__':
sys.exit(main())
| mit |
aemerick/galaxy_analysis | method_paper_plots/star_abundances.py | 1 | 26128 | from galaxy_analysis.plot.plot_styles import *
import matplotlib.pyplot as plt
import glob
import deepdish as dd
import yt
from galaxy_analysis.utilities import utilities
import numpy as np
from matplotlib.ticker import NullFormatter
from galaxy_analysis.particle_analysis.abundances import single_MDF
#
from galaxy_analysis.analysis import Galaxy
from mpl_toolkits.axes_grid1 import make_axes_locatable
import h5py
# grab the most recent file
workdir = '/mnt/ceph/users/emerick/enzo_runs/pleiades/starIC/run11_30km/final_sndriving/'
#workdir = '/home/emerick/work/enzo_runs/pleiades/starIC/run11_30km/final_sndriving/'
data_files = np.sort(glob.glob(workdir + 'DD????'))
name = data_files[-1].split('final_sndriving/')[1]
gal = Galaxy(name, wdir = workdir)
#
#
#
def plot_alpha_vs_fe():
fig,ax = plt.subplots()
fig.set_size_inches(8,7)
ptype = gal.df['particle_type']
fe_over_h = gal.df[('io','particle_Fe_over_H')]
alpha = gal.df[('io','particle_alpha_over_Fe')]
age = (gal.ds.current_time - gal.df[('io','creation_time')]).convert_to_units('Myr')
age = age - np.min(age)
p = ax.scatter(fe_over_h[ptype==11], alpha[ptype==11],
s = point_size, lw = 2, c = age[ptype==11], cmap = 'plasma_r', alpha = 0.75)
p.set_clim([0.0, np.max(age)])
cb = fig.colorbar(p)
cb.set_label(r'Stellar Age (Myr)')
ax.set_xlim(-9,-1)
ax.set_ylim(-1.75,1.75)
ax.set_xlabel(r'[Fe/H]')
ax.set_ylabel(r'[$\rm \alpha$/Fe]')
plt.minorticks_on()
plt.tight_layout()
fig.savefig('alpha_over_fe.png')
plt.close()
return
def plot_alpha_vs_fe_movie():
times = np.arange(0, 245, 1)
for i, t in enumerate(times):
plot_alpha_vs_fe_with_histograms(t_f = t, image_num = i)
def plot_alpha_vs_fe_with_histograms(t_f = None, image_num = 0):
sep = 0.02
left, width = 0.125, 0.65
bottom, height = 0.1, 0.65
left_h = left + width + sep
bottom_h = bottom + height + sep
rect_scatter = [left,bottom,width,height]
# rect_colorbar =
# rect_histx = [left, bottom_h, width, 0.95 - bottom_h - (left-bottom)]
# rect_histy = [left_h, bottom, 0.95 - left_h, height]
# fig,ax = plt.subplots()
fig = plt.figure(1, figsize=(8,8))
# fig.set_size_inches(8,8)
ax_scatter = plt.axes(rect_scatter)
# ax_hist_x = plt.axes(rect_histx)
# ax_hist_y = plt.axes(rect_histy)
# ax_color = plt.axes(rect_colorbar)
ptype = gal.df['particle_type']
fe_over_h = gal.df[('io','particle_Fe_over_H')]
alpha = gal.df[('io','particle_alpha_over_Fe')]
creation_time = gal.df[('io','creation_time')].convert_to_units('Myr')
age = (gal.ds.current_time - creation_time)
if t_f is None: # plot normally all MS stars
age = age - np.min(age)
# scatter plot
p = ax_scatter.scatter(fe_over_h[ptype==11], alpha[ptype==11],
s = point_size, lw = 2, c = age[ptype==11], cmap = 'plasma_r', alpha = 0.75)
p.set_clim([0.0, np.max(age)])
else:
min_clim = 0.0
max_clim = np.max( age - np.min(age))
particle_lifetimes = gal.df[('io','particle_model_lifetime')].convert_to_units('Myr')
selection = (t_f >= creation_time) * ( t_f < creation_time + particle_lifetimes)
age = t_f - creation_time
if np.size(fe_over_h[selection]) < 1:
plot_fe_over_h = np.ones(np.size(fe_over_h))*(-10000) # make dummy values so plot still diplays, but is empty
plot_alpha = np.ones(np.size(alpha))*(-10000)
plot_age = np.ones(np.size(age))*(-10000)
else:
plot_fe_over_h = fe_over_h[selection]
plot_alpha = alpha[selection]
plot_age = age[selection]
p = ax_scatter.scatter(plot_fe_over_h, plot_alpha, s = point_size, lw = 2,
c = plot_age, cmap = 'plasma_r', alpha = 0.75)
p.set_clim([min_clim,max_clim])
cb = fig.colorbar(p, ax = ax_scatter, orientation = 'horizontal', pad = 0.125, fraction = 0.046,
aspect = 40)
cb.set_label(r'Stellar Age (Myr)')
#
#
ax_scatter.set_xlim(-9,-1)
ax_scatter.set_ylim(-1.75,1.75)
ax_scatter.tick_params(axis='x',which='minor',bottom='on')
ax_scatter.tick_params(axis='y',which='minor',bottom='on')
ax_scatter.set_xlabel(r'[Fe/H]')
ax_scatter.set_ylabel(r'[$\rm \alpha$/Fe]')
plt.minorticks_on()
ax_scatter.plot( ax_scatter.get_xlim(), [0.0,0.0], lw = line_width, color = 'black', ls = '--')
#
# find main plot and construct histograms
#
divider = make_axes_locatable(ax_scatter)
left, bottom, width, height = divider.get_position()
# width, height = divider.get_horizontal(), divider.get_vertical()
sep = 0.01
thickness = np.min( np.array([0.95 - left - width - sep, 0.95 - bottom - height - sep]))
rect_histx = [left, bottom + height + sep, width, thickness]
rect_histy = [left + width + sep, bottom, thickness, height]
ax_hist_x = plt.axes(rect_histx)
ax_hist_y = plt.axes(rect_histy)
nbins = 100
hist,bins = np.histogram(fe_over_h, bins = nbins)
weights = np.ones(np.size(fe_over_h)) * (1.0 / (1.0*np.max(hist)))
ax_hist_x.hist(fe_over_h, color = 'C0', bins = nbins, weights = weights)
if not (t_f is None):
if np.max(plot_fe_over_h) > -1000:
hist,bins = np.histogram(plot_fe_over_h, bins = nbins)
weights = np.ones(np.size(plot_fe_over_h)) * (1.0 / (1.0*np.max(hist)))
ax_hist_x.hist(plot_fe_over_h, color = 'black', bins = nbins, weights = weights,
histtype = 'step', lw = 2.0)
# plot_histogram(ax_hist_x, bins, hist / (1.0*np.max(hist)), color = 'black')
plt.minorticks_on()
# hist,bins = np.histogram(alpha, bins = 24)
# plot_histogram(ax_hist_y, bins, hist / (1.0*np.max(hist)), color = 'black', orientation = 'horizontal')
nbins = 50
hist,bins = np.histogram(alpha, bins = nbins)
weights = np.ones(np.size(fe_over_h)) * (1.0 / (1.0*np.max(hist)))
ax_hist_y.hist(alpha, orientation='horizontal', color = 'C0', bins = nbins, weights = weights)
if not (t_f is None):
if np.max(plot_alpha) > -1000:
hist,bins = np.histogram(plot_alpha, bins = nbins)
weights = np.ones(np.size(plot_alpha)) * (1.0 / (1.0*np.max(hist)))
ax_hist_y.hist(plot_alpha, orientation = 'horizontal', color = 'black', bins = nbins,
weights = weights, histtype='step', lw = 2.0)
ax_hist_x.xaxis.set_major_formatter(NullFormatter())
ax_hist_y.yaxis.set_major_formatter(NullFormatter())
ax_hist_x.set_xlim(ax_scatter.get_xlim())
ax_hist_y.set_ylim(ax_scatter.get_ylim())
ticks = [0.0,0.25,0.5,0.75,1.0]
ax_hist_x.set_yticks(ticks)
ax_hist_y.set_xticks(ticks)
ax_hist_y.set_xticklabels(ticks, rotation = 270)
plt.minorticks_on()
# plt.tight_layout()
if t_f is None:
fig.savefig('alpha_over_fe_hist.png')
else:
fig.savefig('alpha_movie/alpha_over_fe_hist_%0004i.png'%(image_num))
plt.close()
return
def plot_panel(A = 'Fe', B = 'Fe', C = 'H', color = True):
"""
Make panel plots of X/A vs. B/C where "X" is a loop through all elements available,
and A, B, C are fixed for all plots, chosen by user. Defualt will plot
[X/Fe] vs. [Fe/H]. Default behavior is to color points by age.
"""
filename = workdir + '/abundances/abundances/abundances.h5'
hdf5_data = h5py.File(filename, 'r')
dfiles = hdf5_data.keys()
dfile = dfiles[-1] # do this with most recent data file
data = dd.io.load(filename, '/' + str(dfile))
elements = utilities.sort_by_anum([x for x in data['abundances'].keys() if (not 'alpha' in x)])
elements = elements + ['alpha']
age = data['Time'] - data['creation_time'] # age of all particles in this data set
for base in ['H','Fe']:
fig, ax = plt.subplots(4,4, sharex = True, sharey = True)
fig.set_size_inches(4*4,4*4)
fig.subplots_adjust(hspace=0.0, wspace = 0.0)
if base == 'Fe':
bins = np.arange(-3,3.1,0.1)
else:
bins = np.arange(-9,0,0.1)
i,j = 0,0
for e in elements:
if (A == e): # skip
continue
index = (i,j)
y = np.array(data['abundances'][e][A])
x = np.array(data['abundances'][B][C])
p = ax[index].scatter(x, y, s = point_size*0.5,
lw = 2, c = age, cmap = 'plasma_r', alpha = 0.75)
p.set_clim([0.0, np.max(age)])
xy = (0.8,0.8)
ax[index].annotate(e, xy=xy, xytext=xy, xycoords = 'axes fraction',
textcoords = 'axes fraction')
# cb = fig.colorbar(p)
# cb.set_label(r'Stellar Age (Myr)')
j = j + 1
if j >= 4:
j = 0
i = i + 1
for i in np.arange(4):
ax[(3,i)].set_xlabel(r'log([' + B + '/' + C + '])')
ax[(i,0)].set_ylabel(r'log([X/' + A + '])')
if C == 'H':
ax[(i,0)].set_xlim(-10.25, 0.125)
else:
ax[(i,0)].set_xlim(-3.25, 3.25)
if A == 'H':
ax[(0,i)].set_ylim(-10.25, 0.125)
else:
ax[(0,i)].set_ylim(-3.25, 3.25)
for j in np.arange(4):
ax[(j,i)].plot([-10,10], [0.0,0.0], lw = 0.5 * line_width, ls = ':', color = 'black')
plt.minorticks_on()
fig.savefig('X_over_' + A +'_vs_' + B + '_over_' + C + '_panel.png')
plt.close()
return
def plot_spatial_profiles(field = 'metallicity', abundance = False,
bins = None, spatial_type = 'cylindrical_radius'):
filename = workdir + '/abundances/abundances/abundances.h5'
hdf5_data = h5py.File(filename, 'r')
dfiles = hdf5_data.keys()
dfile = dfiles[-1] # do this with most recent data file
data = dd.io.load(filename, '/' + str(dfile))
elements = utilities.sort_by_anum([x for x in data['abundances'].keys() if (not 'alpha' in x)])
elements = elements + ['alpha']
if spatial_type == 'cylindrical_radius':
bin_field = np.sqrt(data['kinematics']['x']**2 + data['kinematics']['y']**2)
xlabel = r'Radius (pc)'
elif spatial_type == 'z':
bin_field = np.abs( data['kinematics']['z'] )
xlabel = r'Z (pc)'
if bins is None:
bins = np.linspace(np.floor(np.min(bin_field)), np.ceil(np.max(bin_field)), 100)
centers = 0.5 * (bins[1:] + bins[:-1])
nbins = np.size(bins)
hist_index = np.digitize(bin_field, bins = bins)
median, q1, q3 = np.zeros(nbins-1), np.zeros(nbins-1), np.zeros(nbins-1)
if field == 'metallicity':
# make a single plot
# bin the data
for i in np.arange(nbins-1):
x = data['metallicity'][hist_index == i + 1]
median[i] = np.median(x)
if np.size(x) > 1:
q1[i] = np.percentile(x, 25.0)
q3[i] = np.percentile(x, 75.0)
elif np.size(x) == 1:
q1[i] = median[i]
q3[i] = median[i]
# now plot
fig, ax = plt.subplots()
fig.set_size_inches(8,8)
plot_histogram(ax, bins, median, lw = line_width, color = 'black', ls = '-')
ax.fill_between(centers, q1, q3, lw = 1.5, color = 'grey')
ax.set_ylabel(r'Metallicity Fraction')
ax.set_xlabel(xlabel)
ax.set_xlim( np.min(bins), np.max(bins))
plt.tight_layout()
plt.minorticks_on()
fig.savefig('metallicity_' + spatial_type + '_profile.png')
plt.close()
elif abundance:
fig, ax = plt.subplots(4,4, sharex = True, sharey = True)
fig.set_size_inches(16,16)
fig.subplots_adjust(hspace = 0.0, wspace = 0.0)
axi, axj = 0,0
for e in elements:
if field == e:
continue
index = (axi,axj)
for i in np.arange(nbins-1):
x = np.array(data['abundances'][e][field])
x = x[ hist_index == (i + 1)]
if np.size(x) > 0:
median[i] = np.median(x)
q1[i] = np.percentile(x, 25)
q3[i] = np.percentile(x, 75)
else:
median[i] = None; q1[i] = None; q3[i] = None
ax[index].annotate(e, xy=(0.8,0.8),xytext=(0.8,0.8),
xycoords='axes fraction',textcoords = 'axes fraction')
plot_histogram(ax[index], bins, median, lw = line_width, color = 'black', ls = '-')
ax[index].fill_between(centers,q1,q3,lw=1.5,color='grey')
axj = axj+1
if axj>=4:
axj = 0
axi = axi + 1
for i in np.arange(4):
ax[(3,i)].set_xlabel(xlabel)
ax[(i,0)].set_ylabel(r'log[X/' + field +'])')
if field == 'H':
ax[(0,i)].set_ylim(-10.25,0.125)
else:
ax[(0,i)].set_ylim(-3.25,3.25)
for j in np.arange(4):
ax[(j,i)].plot([bins[0],bins[-1]], [0.0,0.0], lw = 0.5 * line_width, ls = '--',color ='black')
ax[(i,0)].set_xlim(np.min(bins), np.max(bins))
plt.minorticks_on()
fig.savefig(field + '_' + spatial_type + '_profile_panel.png')
plt.close()
return
def plot_MDF(plot_base = ['H','Fe']):
"""
Make a panel plot of the time evolution of all elemental abundance ratios
with respect to both H and Fe (as separate plots)
"""
if (not (type(plot_base) is list)):
plot_base = [plot_base]
filename = workdir + '/abundances/abundances/abundances.h5'
hdf5_data = h5py.File(filename, 'r')
dfiles = hdf5_data.keys()
dfile = dfiles[-1] # do this with most recent data file
data = dd.io.load(filename, '/' + str(dfile))
elements = utilities.sort_by_anum([x for x in data['abundances'].keys() if (not 'alpha' in x)])
elements = elements + ['alpha']
for base in plot_base:
fig, ax = plt.subplots(4,4, sharex = True, sharey = True)
fig.set_size_inches(4*4,4*4)
fig.subplots_adjust(hspace=0.0, wspace = 0.0)
if base == 'Fe':
bins = np.arange(-3,3.1,0.1)
else:
bins = np.arange(-9,0,0.1)
i,j = 0,0
for e in elements:
if (base == e):
continue
index = (i,j)
points = np.array(data['abundances'][e][base])
single_MDF(points, bins = bins, norm = 'peak', ax = ax[index],
label = False, lw = line_width)
x = np.max(bins) - (0.25/6.0 * (bins[-1] - bins[0]))
y = 0.9
ax[index].annotate(e, xy = (x,y), xytext =(x,y))
ax[index].plot([0,0], [0.0,1.0], ls = ':', lw = 0.5 * line_width, color = 'black')
j = j + 1
if j >= 4:
j = 0
i = i + 1
for i in np.arange(4):
ax[(3,i)].set_xlabel(r'log([X/' + base + '])')
ax[(i,0)].set_ylabel(r'N/N$_{\rm peak}$')
if base == 'H':
ax[(i,0)].set_xlim(-10.25, 0.125)
elif base == 'Fe':
ax[(i,0)].set_xlim(-3.25, 3.25)
plt.minorticks_on()
fig.savefig(base + '_MDF.png')
plt.close()
return
def plot_time_evolution():
"""
Make a panel plot of the time evolution of all elemental abundance ratios
with respect to both H and Fe (as separate plots)
"""
filename = workdir + '/abundances/abundances/abundances.h5'
hdf5_data = h5py.File(filename, 'r')
dfiles = hdf5_data.keys()
dfile = dfiles[-1] # do this with most recent data file
data = dd.io.load(filename, '/' + str(dfile))
elements = utilities.sort_by_anum([x for x in data['abundances'].keys() if (not 'alpha' in x)])
elements = elements + ['alpha']
for time_type in ['cumulative','10Myr']:
for base in ['H','Fe']:
fig, ax = plt.subplots(4,4, sharex = True, sharey = True)
fig.set_size_inches(4*4,4*4)
fig.subplots_adjust(hspace=0.0, wspace = 0.0)
i,j = 0,0
for e in elements:
if (base == e):
continue
print("plotting " + e + "/" + base + " time evolution")
index = (i,j)
t = data['statistics'][time_type]['bins']
y = data['statistics'][time_type][e][base]['median']
Q1 = data['statistics'][time_type][e][base]['Q1']
Q3 = data['statistics'][time_type][e][base]['Q3']
select = (y*0 == 0) # remove nan values
t = t[select]
t = t - t[0]
ax[index].plot( t, y[select], lw = line_width, ls = '-', color = 'black', label = r' ' + e +' ')
ax[index].fill_between(t, Q1[select], Q3[select], color = 'black', alpha = 0.5, lw = 0.5 * line_width)
ax[index].set_xlim(0.0, np.max(t))
ax[index].plot( [0.0,1000.0], [0.0,0.0], ls = ':', color = 'black', lw = line_width)
ax[index].legend(loc = 'upper right')
j = j + 1
if j >= 4:
j = 0
i = i + 1
for i in np.arange(4):
ax[(3,i)].set_xlabel(r'Time (Myr)')
ax[(i,0)].set_ylabel(r'[X/' + base +']')
if base == 'H':
ax[(i,0)].set_ylim(-12.25, 0.125)
elif base == 'Fe':
ax[(i,0)].set_ylim(-3.25, 3.25)
# for j in np.arange(3):
# ax[(j,i)].set_xticklabels([])
# ax[(i,j+1)].set_yticklabels([])
# ax[(3,i)].set_xticklabels(np.arange(0,np.max(t)+20,20))
# if base == 'Fe':
# ax[(i,0)].set_yticklabels([-3,-2,-1,0,1,2,3,])
# else:
# ax[(i,0)].set_yticklabels([-12, -10, -8, -6, -4, -2, 0])
plt.minorticks_on()
fig.savefig('stellar_x_over_' + base + '_' + time_type +'_evolution.png')
plt.close()
return
def plot_mass_fraction_time_evolution():
"""
Make a panel plot of the time evolution of all elemental abundance ratios
with respect to both H and Fe (as separate plots)
"""
filename = workdir + '/abundances/abundances/abundances.h5'
hdf5_data = h5py.File(filename, 'r')
dfiles = hdf5_data.keys()
dfile = dfiles[-1] # do this with most recent data file
data = dd.io.load(filename, '/' + str(dfile))
elements = utilities.sort_by_anum([x for x in data['abundances'].keys() if (not 'alpha' in x)])
# elements = elements + ['alpha']
for time_type in ['cumulative','10Myr']:
fig, ax = plt.subplots(4,4, sharex = True, sharey = True)
fig.set_size_inches(4*4,4*4)
fig.subplots_adjust(hspace=0.0, wspace = 0.0)
i,j = 0,0
for e in elements:
print("plotting " + e + "mass fraction time evolution")
index = (i,j)
t = data['mass_fraction_statistics'][time_type]['bins']
y = data['mass_fraction_statistics'][time_type][e]['median']
Q1 = data['mass_fraction_statistics'][time_type][e]['Q1']
Q3 = data['mass_fraction_statistics'][time_type][e]['Q3']
select = (y*0 == 0) # remove nan values
t = t[select]
t = t - t[0]
ax[index].plot( t, y[select], lw = line_width, ls = '-', color = 'black', label = r' ' + e +' ')
ax[index].fill_between(t, Q1[select], Q3[select], color = 'black', alpha = 0.5, lw = 0.5 * line_width)
ax[index].set_xlim(0.0, np.max(t))
ax[index].plot( [0.0,1000.0], [0.0,0.0], ls = ':', color = 'black', lw = line_width)
ax[index].legend(loc = 'upper right')
j = j + 1
if j >= 4:
j = 0
i = i + 1
for i in np.arange(4):
ax[(3,i)].set_xlabel(r'Time (Myr)')
ax[(i,0)].set_ylabel(r'log(X Mass Fraction)')
ax[(i,0)].set_ylim(1.0E-10, 1.0E-4)
ax[(i,0)].semilogy()
# for j in np.arange(3):
# ax[(j,i)].set_xticklabels([])
# ax[(i,j+1)].set_yticklabels([])
# ax[(3,i)].set_xticklabels(np.arange(0,np.max(t)+20,20))
# if base == 'Fe':
# ax[(i,0)].set_yticklabels([-3,-2,-1,0,1,2,3,])
# else:
# ax[(i,0)].set_yticklabels([-12, -10, -8, -6, -4, -2, 0])
plt.minorticks_on()
fig.savefig('stellar_mass_fraction_' + time_type +'_evolution.png')
plt.close()
return
def plot_ratios_with_histograms(X='alpha',A='Fe',B='Fe',C='H'):
filename = workdir + '/abundances/abundances/abundances.h5'
hdf5_data = h5py.File(filename, 'r')
dfiles = hdf5_data.keys()
dfile = dfiles[-1] # do this with most recent data file
data = dd.io.load(filename, '/' + str(dfile))
elements = utilities.sort_by_anum([x for x in data['abundances'].keys() if (not 'alpha' in x)])
elements = elements + ['alpha'] + ['H']
age = data['Time'] - data['creation_time'] # age of all particles in this data set
# --------------------
check_elements = [x for x in [X,A,B,C] if (not (x in elements))]
if len(check_elements) > 0:
print(check_elements, " not in elements list")
print("available: ", elements)
raise ValueError
sep = 0.02
left, width = 0.125, 0.65
bottom, height = 0.1, 0.65
left_h = left + width + sep
bottom_h = bottom + height + sep
rect_scatter = [left,bottom,width,height]
# rect_colorbar =
# rect_histx = [left, bottom_h, width, 0.95 - bottom_h - (left-bottom)]
# rect_histy = [left_h, bottom, 0.95 - left_h, height]
# fig,ax = plt.subplots()
fig = plt.figure(1, figsize=(8,8))
# fig.set_size_inches(8,8)
ax_scatter = plt.axes(rect_scatter)
# ax_hist_x = plt.axes(rect_histx)
# ax_hist_y = plt.axes(rect_histy)
# ax_color = plt.axes(rect_colorbar)
x_values = data['abundances'][B][C]
y_values = data['abundances'][X][A]
age = age - np.min(age) # normalize
# scatter plot
p = ax_scatter.scatter(x_values, y_values,
s = point_size, lw = 2, c = age, cmap = 'plasma_r', alpha = 0.75)
p.set_clim([0.0, np.max(age)])
cb = fig.colorbar(p, ax = ax_scatter, orientation = 'horizontal', pad = 0.125, fraction = 0.046,
aspect = 40)
cb.set_label(r'Stellar Age (Myr)')
#
#
#
ax_scatter.set_xlim(-9,-1)
ax_scatter.set_ylim(-1.75,1.75)
ax_scatter.tick_params(axis='x',which='minor',bottom='on')
ax_scatter.tick_params(axis='y',which='minor',bottom='on')
ax_scatter.set_xlabel(r'log([' + B + '/' + C + '])')
ax_scatter.set_ylabel(r'log([' + X + '/' + A + '])')
plt.minorticks_on()
#
# find main plot and construct histograms
#
divider = make_axes_locatable(ax_scatter)
left, bottom, width, height = divider.get_position()
# width, height = divider.get_horizontal(), divider.get_vertical()
sep = 0.01
thickness = np.min( np.array([0.95 - left - width - sep, 0.95 - bottom - height - sep]))
rect_histx = [left, bottom + height + sep, width, thickness]
rect_histy = [left + width + sep, bottom, thickness, height]
ax_hist_x = plt.axes(rect_histx)
ax_hist_y = plt.axes(rect_histy)
# construct the histogram for the horizontal axis (goes up top)
nbins = 100
hist,bins = np.histogram(x_values, bins = nbins)
weights = np.ones(np.size(x_values)) * (1.0 / (1.0*np.max(hist)))
ax_hist_x.hist(x_values, color = 'C0', bins = nbins, weights = weights)
# plot_histogram(ax_hist_x, bins, hist / (1.0*np.max(hist)), color = 'black')
plt.minorticks_on()
# hist,bins = np.histogram(alpha, bins = 24)
# plot_histogram(ax_hist_y, bins, hist / (1.0*np.max(hist)), color = 'black', orientation = 'horizontal')
# now do the same for the vertical axis histogram
nbins = 50
hist,bins = np.histogram(y_values, bins = nbins)
weights = np.ones(np.size(y_values)) * (1.0 / (1.0*np.max(hist)))
ax_hist_y.hist(y_values, orientation='horizontal', color = 'C0', bins = nbins, weights = weights)
ax_hist_x.xaxis.set_major_formatter(NullFormatter())
ax_hist_y.yaxis.set_major_formatter(NullFormatter())
ax_hist_x.set_xlim(ax_scatter.get_xlim())
ax_hist_y.set_ylim(ax_scatter.get_ylim())
ticks = [0.0,0.25,0.5,0.75,1.0]
ax_hist_x.set_yticks(ticks)
ax_hist_y.set_xticks(ticks)
ax_hist_y.set_xticklabels(ticks, rotation = 270)
plt.minorticks_on()
# plt.tight_layout()
fig.savefig(X + '_over_' + A + '_vs_' + B + '_over_' + C + '_hist.png')
plt.close()
return
if __name__ == '__main__':
plot_mass_fraction_time_evolution() #
# plot_ratios_with_histograms('C','O','Fe','H') # C/O vs Fe/H
# plot_ratios_with_histograms('alpha','Mg','Mg','H')
# plot_ratios_with_histograms('alpha','Fe','Fe','H')
# plot_panel() # default [X/Fe] vs [Fe/H]
# plot_panel(A = 'Mg', B = 'Fe', C = 'H')
# plot_panel(A = 'Mg', B = 'Mg', C = 'Fe')
# plot_panel(A = 'O', B = 'Fe', C = 'H')
# plot_panel(A = 'O', B = 'O', C = 'Fe')
# plot_panel(A = 'Ba', B = 'Ba', C = 'Fe')
# plot_MDF(plot_base = ['H','Fe','O','Ba'])
# plot_time_evolution()
# plot_alpha_vs_fe_with_histograms()
# plot_alpha_vs_fe()
# plot_alpha_vs_fe_movie()
# plot_spatial_profiles(bins=np.arange(0,505,10))
# plot_spatial_profiles(field = 'Fe',abundance=True, bins = np.arange(0,505,10))
# plot_spatial_profiles(field = 'H', abundance=True, bins = np.arange(0,505,10))
| mit |
sernst/cauldron | cauldron/session/display/__init__.py | 1 | 23013 | import json as _json_io
import textwrap
import typing
from datetime import timedelta
import cauldron as _cd
from cauldron import environ
from cauldron import render
from cauldron.render import plots as render_plots
from cauldron.render import texts as render_texts
from cauldron.session import report
def _get_report() -> 'report.Report':
"""Fetches the report associated with the currently running step."""
return _cd.project.get_internal_project().current_step.report
def inspect(source: dict):
"""
Inspects the data and structure of the source dictionary object and
adds the results to the display for viewing.
:param source:
A dictionary object to be inspected.
:return:
"""
r = _get_report()
r.append_body(render.inspect(source))
def header(header_text: str, level: int = 1, expand_full: bool = False):
"""
Adds a text header to the display with the specified level.
:param header_text:
The text to display in the header.
:param level:
The level of the header, which corresponds to the html header
levels, such as <h1>, <h2>, ...
:param expand_full:
Whether or not the header will expand to fill the width of the entire
notebook page, or be constrained by automatic maximum page width. The
default value of False lines the header up with text displays.
"""
r = _get_report()
r.append_body(render.header(
header_text,
level=level,
expand_full=expand_full
))
def text(value: str, preformatted: bool = False):
"""
Adds text to the display. If the text is not preformatted, it will be
displayed in paragraph format. Preformatted text will be displayed
inside a pre tag with a monospace font.
:param value:
The text to display.
:param preformatted:
Whether or not to preserve the whitespace display of the text.
"""
if preformatted:
result = render_texts.preformatted_text(value)
else:
result = render_texts.text(value)
r = _get_report()
r.append_body(result)
r.stdout_interceptor.write_source(
'{}\n'.format(textwrap.dedent(value))
)
def markdown(
source: str = None,
source_path: str = None,
preserve_lines: bool = False,
font_size: float = None,
**kwargs
):
"""
Renders the specified source string or source file using markdown and
adds the resulting HTML to the notebook display.
:param source:
A markdown formatted string.
:param source_path:
A file containing markdown text.
:param preserve_lines:
If True, all line breaks will be treated as hard breaks. Use this
for pre-formatted markdown text where newlines should be retained
during rendering.
:param font_size:
Specifies a relative font size adjustment. The default value is 1.0,
which preserves the inherited font size values. Set it to a value
below 1.0 for smaller font-size rendering and greater than 1.0 for
larger font size rendering.
:param kwargs:
Any variable replacements to make within the string using Jinja2
templating syntax.
"""
r = _get_report()
result = render_texts.markdown(
source=source,
source_path=source_path,
preserve_lines=preserve_lines,
font_size=font_size,
**kwargs
)
r.library_includes += result['library_includes']
r.append_body(result['body'])
r.stdout_interceptor.write_source(
'{}\n'.format(textwrap.dedent(result['rendered']))
)
def json(**kwargs):
"""
Adds the specified data to the the output display window with the
specified key. This allows the user to make available arbitrary
JSON-compatible data to the display for runtime use.
:param kwargs:
Each keyword argument is added to the CD.data object with the
specified key and value.
"""
r = _get_report()
r.append_body(render.json(**kwargs))
r.stdout_interceptor.write_source(
'{}\n'.format(_json_io.dumps(kwargs, indent=2))
)
def plotly(
data: typing.Union[dict, list, typing.Any] = None,
layout: typing.Union[dict, typing.Any] = None,
scale: float = 0.5,
figure: typing.Union[dict, typing.Any] = None,
static: bool = False
):
"""
Creates a Plotly plot in the display with the specified data and
layout.
:param data:
The Plotly trace data to be plotted.
:param layout:
The layout data used for the plot.
:param scale:
The display scale with units of fractional screen height. A value
of 0.5 constrains the output to a maximum height equal to half the
height of browser window when viewed. Values below 1.0 are usually
recommended so the entire output can be viewed without scrolling.
:param figure:
In cases where you need to create a figure instead of separate data
and layout information, you can pass the figure here and leave the
data and layout values as None.
:param static:
If true, the plot will be created without interactivity.
This is useful if you have a lot of plots in your notebook.
"""
r = _get_report()
if not figure and not isinstance(data, (list, tuple)):
data = [data]
if 'plotly' not in r.library_includes:
r.library_includes.append('plotly')
r.append_body(render.plotly(
data=data,
layout=layout,
scale=scale,
figure=figure,
static=static
))
r.stdout_interceptor.write_source('[ADDED] Plotly plot\n')
def table(
data_frame,
scale: float = 0.7,
include_index: bool = False,
max_rows: int = 500,
sample_rows: typing.Optional[int] = None,
formats: typing.Union[
str,
typing.Callable[[typing.Any], str],
typing.Dict[
str,
typing.Union[str, typing.Callable[[typing.Any], str]]
]
] = None
):
"""
Adds the specified data frame to the display in a nicely formatted
scrolling table.
:param data_frame:
The pandas data frame to be rendered to a table.
:param scale:
The display scale with units of fractional screen height. A value
of 0.5 constrains the output to a maximum height equal to half the
height of browser window when viewed. Values below 1.0 are usually
recommended so the entire output can be viewed without scrolling.
:param include_index:
Whether or not the index column should be included in the displayed
output. The index column is not included by default because it is
often unnecessary extra information in the display of the data.
:param max_rows:
This argument exists to prevent accidentally writing very large data
frames to a table, which can cause the notebook display to become
sluggish or unresponsive. If you want to display large tables, you need
only increase the value of this argument.
:param sample_rows:
When set to a positive integer value, the DataFrame will be randomly
sampled to the specified number of rows when displayed in the table.
If the value here is larger than the number of rows in the DataFrame,
the sampling will have no effect and the entire DataFrame will be
displayed instead.
:param formats:
An optional dictionary that, when specified, should contain a mapping
between column names and formatting strings to apply to that column
for display purposes. For example, ``{'foo': '{:,.2f}%'}`` would
transform a column ``foo = [12.2121, 34.987123, 42.72839]`` to
display as ``foo = [12.21%, 34.99%, 42.73%]``. The formatters should
follow the standard Python string formatting guidelines the same as
the ``str.format()`` command having the value of the column as the only
positional argument in the format arguments. A string value can also
be specified for uniform formatting of all columns (or if displaying
a series with only a single value).
"""
r = _get_report()
r.append_body(render.table(
data_frame=data_frame,
scale=scale,
include_index=include_index,
max_rows=max_rows,
sample_rows=sample_rows,
formats=formats
))
r.stdout_interceptor.write_source('[ADDED] Table\n')
def svg(svg_dom: str, filename: str = None):
"""
Adds the specified SVG string to the display. If a filename is
included, the SVG data will also be saved to that filename within the
project results folder.
:param svg_dom:
The SVG string data to add to the display.
:param filename:
An optional filename where the SVG data should be saved within
the project results folder.
"""
r = _get_report()
r.append_body(render.svg(svg_dom))
r.stdout_interceptor.write_source('[ADDED] SVG\n')
if not filename:
return
if not filename.endswith('.svg'):
filename += '.svg'
r.files[filename] = svg_dom
def jinja(path: str, **kwargs):
"""
Renders the specified Jinja2 template to HTML and adds the output to the
display.
:param path:
The fully-qualified path to the template to be rendered.
:param kwargs:
Any keyword arguments that will be use as variable replacements within
the template.
"""
r = _get_report()
r.append_body(render.jinja(path, **kwargs))
r.stdout_interceptor.write_source('[ADDED] Jinja2 rendered HTML\n')
def whitespace(lines: float = 1.0):
"""
Adds the specified number of lines of whitespace.
:param lines:
The number of lines of whitespace to show.
"""
r = _get_report()
r.append_body(render.whitespace(lines))
r.stdout_interceptor.write_source('\n')
def image(
filename: str,
width: int = None,
height: int = None,
justify: str = 'left'
):
"""
Adds an image to the display. The image must be located within the
assets directory of the Cauldron notebook's folder.
:param filename:
Name of the file within the assets directory,
:param width:
Optional width in pixels for the image.
:param height:
Optional height in pixels for the image.
:param justify:
One of 'left', 'center' or 'right', which specifies how the image
is horizontally justified within the notebook display.
"""
r = _get_report()
path = '/'.join(['reports', r.project.uuid, 'latest', 'assets', filename])
r.append_body(render.image(path, width, height, justify))
r.stdout_interceptor.write_source('[ADDED] Image\n')
def html(dom: str):
"""
A string containing a valid HTML snippet.
:param dom:
The HTML string to add to the display.
"""
r = _get_report()
r.append_body(render.html(dom))
r.stdout_interceptor.write_source('[ADDED] HTML\n')
def workspace(show_values: bool = True, show_types: bool = True):
"""
Adds a list of the shared variables currently stored in the project
workspace.
:param show_values:
When true the values for each variable will be shown in addition to
their name.
:param show_types:
When true the data types for each shared variable will be shown in
addition to their name.
"""
r = _get_report()
data = {}
for key, value in r.project.shared.fetch(None).items():
if key.startswith('__cauldron_'):
continue
data[key] = value
r.append_body(render.status(data, values=show_values, types=show_types))
def pyplot(
figure=None,
scale: float = 0.8,
clear: bool = True,
aspect_ratio: typing.Union[list, tuple] = None
):
"""
Creates a matplotlib plot in the display for the specified figure. The size
of the plot is determined automatically to best fit the notebook.
:param figure:
The matplotlib figure to plot. If omitted, the currently active
figure will be used.
:param scale:
The display scale with units of fractional screen height. A value
of 0.5 constrains the output to a maximum height equal to half the
height of browser window when viewed. Values below 1.0 are usually
recommended so the entire output can be viewed without scrolling.
:param clear:
Clears the figure after it has been rendered. This is useful to
prevent persisting old plot data between repeated runs of the
project files. This can be disabled if the plot is going to be
used later in the project files.
:param aspect_ratio:
The aspect ratio for the displayed plot as a two-element list or
tuple. The first element is the width and the second element the
height. The units are "inches," which is an important consideration
for the display of text within the figure. If no aspect ratio is
specified, the currently assigned values to the plot will be used
instead.
"""
r = _get_report()
r.append_body(render_plots.pyplot(
figure,
scale=scale,
clear=clear,
aspect_ratio=aspect_ratio
))
r.stdout_interceptor.write_source('[ADDED] PyPlot plot\n')
def bokeh(model, scale: float = 0.7, responsive: bool = True):
"""
Adds a Bokeh plot object to the notebook display.
:param model:
The plot object to be added to the notebook display.
:param scale:
How tall the plot should be in the notebook as a fraction of screen
height. A number between 0.1 and 1.0. The default value is 0.7.
:param responsive:
Whether or not the plot should responsively scale to fill the width
of the notebook. The default is True.
"""
r = _get_report()
if 'bokeh' not in r.library_includes:
r.library_includes.append('bokeh')
r.append_body(render_plots.bokeh_plot(
model=model,
scale=scale,
responsive=responsive
))
r.stdout_interceptor.write_source('[ADDED] Bokeh plot\n')
def listing(
source: list,
ordered: bool = False,
expand_full: bool = False
):
"""
An unordered or ordered list of the specified *source* iterable where
each element is converted to a string representation for display.
:param source:
The iterable to display as a list.
:param ordered:
Whether or not the list should be ordered. If False, which is the
default, an unordered bulleted list is created.
:param expand_full:
Whether or not the list should expand to fill the screen horizontally.
When defaulted to False, the list is constrained to the center view
area of the screen along with other text. This can be useful to keep
lists aligned with the text flow.
"""
r = _get_report()
r.append_body(render.listing(
source=source,
ordered=ordered,
expand_full=expand_full
))
r.stdout_interceptor.write_source('[ADDED] Listing\n')
def list_grid(
source: list,
expand_full: bool = False,
column_count: int = 2,
row_spacing: float = 1.0
):
"""
An multi-column list of the specified *source* iterable where
each element is converted to a string representation for display.
:param source:
The iterable to display as a list.
:param expand_full:
Whether or not the list should expand to fill the screen horizontally.
When defaulted to False, the list is constrained to the center view
area of the screen along with other text. This can be useful to keep
lists aligned with the text flow.
:param column_count:
The number of columns to display. The specified count is applicable to
high-definition screens. For Lower definition screens the actual count
displayed may be fewer as the layout responds to less available
horizontal screen space.
:param row_spacing:
The number of lines of whitespace to include between each row in the
grid. Set this to 0 for tightly displayed lists.
"""
r = _get_report()
r.append_body(render.list_grid(
source=source,
expand_full=expand_full,
column_count=column_count,
row_spacing=row_spacing
))
r.stdout_interceptor.write_source('[ADDED] List grid\n')
def latex(source: str):
"""
Add a mathematical equation in latex math-mode syntax to the display.
Instead of the traditional backslash escape character, the @ character is
used instead to prevent backslash conflicts with Python strings. For
example, \\delta would be @delta.
:param source:
The string representing the latex equation to be rendered.
"""
r = _get_report()
if 'katex' not in r.library_includes:
r.library_includes.append('katex')
r.append_body(render_texts.latex(source.replace('@', '\\')))
r.stdout_interceptor.write_source('[ADDED] Latex equation\n')
def head(source, count: int = 5):
"""
Displays a specified number of elements in a source object of many
different possible types.
:param source:
DataFrames will show *count* rows of that DataFrame. A list, tuple or
other iterable, will show the first *count* rows. Dictionaries will
show *count* keys from the dictionary, which will be randomly selected
unless you are using an OrderedDict. Strings will show the first
*count* characters.
:param count:
The number of elements to show from the source.
"""
r = _get_report()
r.append_body(render_texts.head(source, count=count))
r.stdout_interceptor.write_source('[ADDED] Head\n')
def tail(source, count: int = 5):
"""
The opposite of the head function. Displays the last *count* elements of
the *source* object.
:param source:
DataFrames will show the last *count* rows of that DataFrame. A list,
tuple or other iterable, will show the last *count* rows. Dictionaries
will show *count* keys from the dictionary, which will be randomly
selected unless you are using an OrderedDict. Strings will show the
last *count* characters.
:param count:
The number of elements to show from the source.
"""
r = _get_report()
r.append_body(render_texts.tail(source, count=count))
r.stdout_interceptor.write_source('[ADDED] Tail\n')
def status(
message: str = None,
progress: float = None,
section_message: str = None,
section_progress: float = None,
):
"""
Updates the status display, which is only visible while a step is running.
This is useful for providing feedback and information during long-running
steps.
A section progress is also available for cases where long running tasks
consist of multiple tasks and you want to display sub-progress messages
within the context of the larger status.
Note: this is only supported when running in the Cauldron desktop
application.
:param message:
The status message you want to display. If left blank the previously
set status message will be retained. Should you desire to remove an
existing message, specify a blank string for this argument.
:param progress:
A number between zero and one that indicates the overall progress for
the current status. If no value is specified, the previously assigned
progress will be retained.
:param section_message:
The status message you want to display for a particular task within a
long-running step. If left blank the previously set section message
will be retained. Should you desire to remove an existing message,
specify a blank string for this argument.
:param section_progress:
A number between zero and one that indicates the progress for the
current section status. If no value is specified, the previously
assigned section progress value will be retained.
"""
environ.abort_thread()
r = _get_report()
step = _cd.project.get_internal_project().current_step
changes = 0
has_changed = step.progress_message != message
if message is not None and has_changed:
changes += 1
step.progress_message = message
has_changed = step.progress_message != max(0, min(1, progress or 0))
if progress is not None and has_changed:
changes += 1
step.progress = max(0.0, min(1.0, progress))
has_changed = step.sub_progress_message != section_message
if section_message is not None and has_changed:
changes += 1
step.sub_progress_message = section_message
has_changed = step.sub_progress != max(0, min(1, section_progress or 0))
if section_progress is not None and has_changed:
changes += 1
step.sub_progress = section_progress
if changes > 0:
# update the timestamp to inform rendering that a status
# has changed and should be re-rendered into the step.
r.update_last_modified()
def code_block(
code: str = None,
path: str = None,
language_id: str = None,
title: str = None,
caption: str = None
):
"""
Adds a block of syntax highlighted code to the display from either
the supplied code argument, or from the code file specified
by the path argument.
:param code:
A string containing the code to be added to the display
:param path:
A path to a file containing code to be added to the display
:param language_id:
The language identifier that indicates what language should
be used by the syntax highlighter. Valid values are any of the
languages supported by the Pygments highlighter.
:param title:
If specified, the code block will include a title bar with the
value of this argument
:param caption:
If specified, the code block will include a caption box below the code
that contains the value of this argument
"""
environ.abort_thread()
r = _get_report()
r.append_body(render.code_block(
block=code,
path=path,
language=language_id,
title=title,
caption=caption
))
r.stdout_interceptor.write_source('{}\n'.format(code))
def elapsed():
"""
Displays the elapsed time since the step started running.
"""
environ.abort_thread()
step = _cd.project.get_internal_project().current_step
r = _get_report()
r.append_body(render.elapsed_time(step.elapsed_time))
result = '[ELAPSED]: {}\n'.format(timedelta(seconds=step.elapsed_time))
r.stdout_interceptor.write_source(result)
| mit |
rwgdrummer/maskgen | maskgen/analytics/dctAnalytic.py | 1 | 17525 | # =============================================================================
# Authors: PAR Government
# Organization: DARPA
#
# Copyright (c) 2016 PAR Government
# All rights reserved.
#
#
# adapted from https://github.com/enmasse/jpeg_read
#==============================================================================
import sys
from math import *
from Tkinter import *
import matplotlib.pyplot as plt
import numpy as np
import logging
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
def memoize (function):
# http://programmingzen.com/2009/05/18/memoization-in-ruby-and-python/
cache = {}
def decorated_function (*args):
try:
return cache[args]
except KeyError:
val = function (*args)
cache[args] = val
return val
return decorated_function
@memoize
def decodeBits (len, val):
""" Calculate the value from the "additional" bits in the huffman data. """
return val if (val & (1 << len - 1)) else val - ((1 << len) - 1)
def extractCoeffs (data):
dclum = []
dcchr1 = []
dcchr2 = []
aclum = []
acchr1 = []
acchr2 = []
for MCU in data:
lum = MCU[0]
chr1 = MCU[1]
chr2 = MCU[2]
for MCU_component in lum:
if len (MCU_component):
dclum.append (MCU_component[0])
aclum.extend (MCU_component[1:])
for MCU_component in chr1:
if len (MCU_component):
dcchr1.append (MCU_component[0])
acchr1.extend (MCU_component[1:])
for MCU_component in chr2:
if len (MCU_component):
dcchr2.append (MCU_component[0])
acchr2.extend (MCU_component[1:])
return (dclum, dcchr1, dcchr2, aclum, acchr1, acchr2)
def generateHuffmanCodes (huffsize):
""" Calculate the huffman code of each length. """
huffcode = []
k = 0
code = 0
# Magic
for i in range (len (huffsize)):
si = huffsize[i]
for k in range (si):
huffcode.append ((i + 1, code))
code += 1
code <<= 1
return huffcode
def getBits (num, gen):
""" Get "num" bits from gen. """
out = 0
for i in range (num):
out <<= 1
val = gen.next ()
if val != []:
out += val & 0x01
else:
return []
return out
def mapHuffmanCodes (codes, values):
""" Map the huffman code to the right value. """
out = {}
for i in range (len (codes)):
out[codes[i]] = values[i]
return out
def readAPP (type, file):
""" Read APP marker. """
Lp = readWord (file)
Lp -= 2
# If APP0 try to read the JFIF header
# Not really necessary
if type == 0:
identifier = file.read (5)
Lp -= 5
version = file.read (2)
Lp -= 2
units = ord (file.read (1))
Lp -= 1
Xdensity = ord (file.read (1)) << 8
Xdensity |= ord (file.read (1))
Lp -= 2
Ydensity = ord (file.read (1)) << 8
Ydensity |= ord (file.read (1))
Lp -= 2
file.seek (Lp, 1)
def readByte (file):
""" Read a byte from file. """
return ord (file.read (1))
def readWord (file):
""" Read a 16 bit word from file. """
return ord (file.read (1)) << 8 | ord (file.read (1))
def restoreDC (data):
""" Restore the DC values. They are coded as the difference from the
previous DC value of the same component.
"""
out = []
dc_prev = [0 for x in range (len (data[0]))]
# For each MCU
for mcu in data:
# For each component
for comp_num in range (len (mcu)):
# For each DU
for du in range (len (mcu[comp_num])):
if mcu[comp_num][du]:
mcu[comp_num][du][0] += dc_prev[comp_num]
dc_prev[comp_num] = mcu[comp_num][du][0]
out.append (mcu)
return out
class JPEG_Reader:
""" Class for reading DCT coefficients from JPEG files. """
def __init__ (self):
self.huffman_ac_tables = [{}, {}, {}, {}]
self.huffman_dc_tables = [{}, {}, {}, {}]
self.q_table = [[], [], [], []]
self.XYP = 0, 0, 0
self.component = {}
self.num_components = 0
self.mcus_read = 0
self.dc = []
self.inline_dc = 0
self.bit_stream = []
self.EOI = False
def readDCT_Coeffs (self, filename):
""" Reads and returns DCT coefficients from the supplied JPEG file. """
self.__init__ ()
data = []
with open (filename, "rb") as inputFile:
in_char = inputFile.read (1)
while in_char:
if in_char == chr (0xff):
in_char = inputFile.read (1)
in_num = ord (in_char)
if 0xe0 <= in_num <= 0xef:
readAPP (in_num - 0xe0, inputFile)
elif in_num == 0xdb:
self.__readDQT (inputFile)
elif in_num == 0xdc:
self.__readDNL (inputFile)
elif in_num == 0xc4:
self.__readDHT (inputFile)
elif in_num == 0xc8:
print "JPG"
elif 0xc0 <= in_num <= 0xcf:
self.__readSOF (in_num - 0xc0, inputFile)
elif in_num == 0xda:
self.__readSOS (inputFile)
self.bit_stream = self.__readBit (inputFile)
while not self.EOI:
data.append (self.__readMCU ())
in_char = inputFile.read (1)
return extractCoeffs (data if self.inline_dc else restoreDC (data))
def __readBit (self, file):
""" A generator that reads one bit from file and handles markers and
byte stuffing.
"""
input = file.read (1)
while input and not self.EOI:
if input == chr (0xFF):
cmd = file.read (1)
if cmd:
# Byte stuffing
if cmd == chr (0x00):
input = chr (0xFF)
# End of image marker
elif cmd == chr (0xD9):
self.EOI = True
# Restart markers
elif 0xD0 <= ord (cmd) <= 0xD7 and self.inline_dc:
# Reset dc value
self.dc = [0 for i in range (self.num_components + 1)]
input = file.read (1)
else:
input = file.read (1)
#print "CMD: %x" % ord(cmd)
if not self.EOI:
for i in range (7, -1, -1):
# Output next bit
yield (ord (input) >> i) & 0x01
input = file.read (1)
while True:
yield []
def __readDHT (self, file):
""" Read and compute the huffman tables. """
# Read the marker length
Lh = readWord (file)
Lh -= 2
while Lh > 0:
huffsize = []
huffval = []
T = readByte (file)
Th = T & 0x0F
Tc = (T >> 4) & 0x0F
#print "Lh: %d Th: %d Tc: %d" % (Lh, Th, Tc)
Lh -= 1
# Read how many symbols of each length
# up to 16 bits
for i in range (16):
huffsize.append (readByte (file))
Lh -= 1
# Generate the huffman codes
huffcode = generateHuffmanCodes (huffsize)
#print "Huffcode", huffcode
# Read the values that should be mapped to huffman codes
for i in huffcode:
#print i
try:
huffval.append (readByte (file))
Lh -= 1
except TypeError:
continue
# Generate lookup tables
if Tc == 0:
self.huffman_dc_tables[Th] = mapHuffmanCodes (huffcode, huffval)
else:
self.huffman_ac_tables[Th] = mapHuffmanCodes (huffcode, huffval)
def __readDNL (self, file):
""" Read the DNL marker. Changes the number of lines. """
Ld = readWord (file)
Ld -= 2
NL = readWord (file)
Ld -= 2
X, Y, P = self.XYP
if Y == 0:
self.XYP = X, NL, P
def __readDQT (self, file):
""" Read the quantization table. The table is in zigzag order. """
Lq = readWord (file)
Lq -= 2
while Lq > 0:
table = []
Tq = readByte (file)
Pq = Tq >> 4
Tq &= 0xF
Lq -= 1
if Pq == 0:
for i in range (64):
table.append (readByte (file))
Lq -= 1
else:
for i in range (64):
val = readWord (file)
table.append (val)
Lq -= 2
self.q_table[Tq] = table
def __readDU (self, comp_num):
""" Read one data unit with component index comp_num. """
data = []
comp = self.component[comp_num]
huff_tbl = self.huffman_dc_tables[comp['Td']]
# Fill data with 64 coefficients
while len (data) < 64:
key = 0
for bits in range (1, 17):
key_len = []
key <<= 1
# Get one bit from bit_stream
val = getBits (1, self.bit_stream)
if val == []:
break
key |= val
# If huffman code exists
if huff_tbl.has_key ((bits, key)):
key_len = huff_tbl[(bits, key)]
break
# After getting the DC value switch to the AC table
huff_tbl = self.huffman_ac_tables[comp['Ta']]
if key_len == []:
#print (bits, key, bin(key)), "key not found"
break
# If ZRL fill with 16 zero coefficients
elif key_len == 0xF0:
for i in range (16):
data.append (0)
continue
# If not DC coefficient
if len (data) != 0:
# If End of block
if key_len == 0x00:
# Fill the rest of the DU with zeros
while len (data) < 64:
data.append (0)
break
# The first part of the AC key_len is the number of leading
# zeros
for i in range (key_len >> 4):
if len (data) < 64:
data.append (0)
key_len &= 0x0F
if len (data) >= 64:
break
if key_len != 0:
# The rest of key_len is the number of "additional" bits
val = getBits (key_len, self.bit_stream)
if val == []:
break
# Decode the additional bits
num = decodeBits (key_len, val)
# Experimental, doesn't work right
if len (data) == 0 and self.inline_dc:
# The DC coefficient value is added to the DC value from
# the corresponding DU in the previous MCU
num += self.dc[comp_num]
self.dc[comp_num] = num
data.append (num)
else:
data.append (0)
#if len(data) != 64:
#print "Wrong size", len(data)
return data
def __readMCU (self):
""" Read an MCU. """
comp_num = mcu = range (self.num_components)
# For each component
for i in comp_num:
comp = self.component[i + 1]
mcu[i] = []
# For each DU
for j in range (comp['H'] * comp['V']):
if not self.EOI:
mcu[i].append (self.__readDU (i + 1))
self.mcus_read += 1
return mcu
def __readSOF (self, type, file):
""" Read the start of frame marker. """
Lf = readWord (file) # Read the marker length
Lf -= 2
P = readByte (file) # Read the sample precision
Lf -= 1
Y = readWord (file) # Read number of lines
Lf -= 2
X = readWord (file) # Read the number of samples per line
Lf -= 2
Nf = readByte (file) # Read number of components
Lf -= 1
self.XYP = X, Y, P
#print self.XYP
while Lf > 0:
C = readByte (file) # Read component identifier
V = readByte (file) # Read sampling factors
Tq = readByte (file)
Lf -= 3
H = V >> 4
V &= 0xF
# Assign horizontal & vertical sampling factors and qtable
self.component[C] = { 'H' : H, 'V' : V, 'Tq' : Tq }
def __readSOS (self, file):
""" Read the start of scan marker. """
Ls = readWord (file)
Ls -= 2
Ns = readByte (file) # Read number of components in scan
Ls -= 1
for i in range (Ns):
Cs = readByte (file) # Read the scan component selector
Ls -= 1
Ta = readByte (file) # Read the huffman table selectors
Ls -= 1
Td = Ta >> 4
Ta &= 0xF
# Assign the DC huffman table
self.component[Cs]['Td'] = Td
# Assign the AC huffman table
self.component[Cs]['Ta'] = Ta
Ss = readByte (file) # Should be zero if baseline DCT
Ls -= 1
Se = readByte (file) # Should be 63 if baseline DCT
Ls -= 1
A = readByte (file) # Should be zero if baseline DCT
Ls -= 1
#print "Ns:%d Ss:%d Se:%d A:%02X" % (Ns, Ss, Se, A)
self.num_components = Ns
self.dc = [0 for i in range (self.num_components + 1)]
def dequantize (self, mcu):
""" Dequantize an MCU. """
out = mcu
# For each coefficient in each DU in each component, multiply by the
# corresponding value in the quantization table.
for c in range (len (out)):
for du in range (len (out[c])):
for i in range (len (out[c][du])):
out[c][du][i] *= self.q_table[self.component[c + 1]['Tq']][i]
return out
def getHist(filename):
try:
import JPEG_MetaInfoPy
hist, lowValue = JPEG_MetaInfoPy.generateHistogram(filename)
return np.asarray(hist),np.asarray(range(lowValue,lowValue+len(hist)+1))
except Exception as ex:
logging.getLogger('maskgen').warn('External JPEG_MetaInfoPy failed: {}'.format(str(ex)))
DC = JPEG_Reader().readDCT_Coeffs(filename)[0]
minDC = min(DC)
maxDC = max(DC)
binCount = maxDC - minDC + 1
return np.histogram (DC, bins=binCount,
range=(minDC, maxDC + 1))
class JPEG_View:
def appliesTo (self, filename):
return filename.lower ().endswith (('jpg', 'jpeg'))
def draw (self, frame, filename):
fig = plt.figure ();
self._plotHistogram (fig, getHist(filename))
canvas = FigureCanvasTkAgg (fig, frame)
canvas.show ()
canvas.get_tk_widget ().pack (side=BOTTOM, fill=BOTH, expand=True)
def _labelSigma (self, figure, sigma):
""" Add a label of the value of sigma to the histogram plot. """
props = dict (boxstyle='round', facecolor='wheat', alpha=0.5)
figure.text (0.25, 0.85, '$\sigma=%.2f$' % (sigma),
fontsize=14, verticalalignment='top', bbox=props)
class DCTView (JPEG_View):
def screenName (self):
return 'JPG DCT Histogram'
def _plotHistogram (self, figure, histogram):
ordinates, abscissae = histogram
plt.bar (abscissae[:-1], ordinates, 1);
self._labelSigma (figure, ordinates.std ())
class FFT_DCTView (JPEG_View):
def screenName (self):
return 'FFT(JPG DCT Histogram)'
def _plotHistogram (self, figure, histogram):
# Calculate the DFT of the zero-meaned histogram values. The n/2+1
# positive frequencies are returned by rfft. Mirror the result back
# into ordinates.
#
mean = histogram[0].mean ()
posFreqs = abs (np.fft.rfft ([i - mean for i in histogram[0]]))
ordinates = list (reversed (posFreqs))
ordinates.extend (posFreqs[1:])
n = len (posFreqs)
abscissae = range (1 - n, n)
plt.plot (abscissae, ordinates, 'k')
plt.plot (abscissae, self.__hat (ordinates), 'r')
self._labelSigma (figure, np.std (ordinates))
def __hat (self, data):
length = len (data)
intercept1 = int (length * 0.425)
intercept2 = int (length * 0.575)
amp = max (data)
threshold = amp * 0.15
arr = np.full (length, threshold)
arr[intercept1:intercept2] = amp
return arr
if __name__ == "__main__":
DCTView ().draw (None, sys.argv[1])
FFT_DCTView ().draw (None, sys.argv[1]) | bsd-3-clause |
DataCanvasIO/example-modules | modules/modeling/basic/linear_svc_estimator/main.py | 2 | 1630 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
from specparser import get_settings_from_file
from pprint import pprint
import csv
from sklearn.svm import LinearSVC
import numpy as np
from sklearn.externals import joblib
import matplotlib
matplotlib.use('Agg')
import datetime
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
def drawPrecisionRecall(X,Y,output_file):
pdf = PdfPages(output_file)
plt.figure(figsize=(len(Y), len(X)))
plt.plot(Y, X, 'r-o')
plt.title('Precision/Recall')
pdf.savefig() # saves the current figure into a pdf page
plt.close()
pdf.close()
def readcolumn(filename):
column = []
with open(filename,"r") as fconcl:
for line in fconcl:
column.append(line.rstrip('\n'))
return column
def main():
settings = get_settings_from_file("spec.json")
print(settings)
X = np.genfromtxt(settings.Input.X, delimiter=',', skip_header=1)
svc = joblib.load(settings.Input.MODEL)
Y_out = svc.predict(X)
Y_list = [Y_out]
np.savetxt("./conclusion.csv", Y_out, fmt="%d", delimiter=",")
conclusion = readcolumn("./conclusion.csv")
label = readcolumn(settings.Input.Y)
precision_list = []
recall_list = []
hits = 0
for i in range(len(label)):
if conclusion[i] == label[i]:
hits+=1
precision_list.append(1.0*hits/(i+1))
recall_list.append(1.0*hits/(len(label)))
drawPrecisionRecall(precision_list,recall_list,settings.Output.report)
print("Done")
if __name__ == "__main__":
main()
| bsd-3-clause |
bjlittle/iris | docs/gallery_code/oceanography/plot_atlantic_profiles.py | 2 | 3317 | """
Oceanographic Profiles and T-S Diagrams
=======================================
This example demonstrates how to plot vertical profiles of different
variables in the same axes, and how to make a scatter plot of two
variables. There is an oceanographic theme but the same techniques are
equally applicable to atmospheric or other kinds of data.
The data used are profiles of potential temperature and salinity in the
Equatorial and South Atlantic, output from an ocean model.
The y-axis of the first plot produced will be automatically inverted due to the
presence of the attribute positive=down on the depth coordinate. This means
depth values intuitively increase downward on the y-axis.
"""
import matplotlib.pyplot as plt
import iris
import iris.iterate
import iris.plot as iplt
def main():
# Load the gridded temperature and salinity data.
fname = iris.sample_data_path("atlantic_profiles.nc")
cubes = iris.load(fname)
(theta,) = cubes.extract("sea_water_potential_temperature")
(salinity,) = cubes.extract("sea_water_practical_salinity")
# Extract profiles of temperature and salinity from a particular point in
# the southern portion of the domain, and limit the depth of the profile
# to 1000m.
lon_cons = iris.Constraint(longitude=330.5)
lat_cons = iris.Constraint(latitude=lambda l: -10 < l < -9)
depth_cons = iris.Constraint(depth=lambda d: d <= 1000)
theta_1000m = theta.extract(depth_cons & lon_cons & lat_cons)
salinity_1000m = salinity.extract(depth_cons & lon_cons & lat_cons)
# Plot these profiles on the same set of axes. Depth is automatically
# recognised as a vertical coordinate and placed on the y-axis.
# The first plot is in the default axes. We'll use the same color for the
# curve and its axes/tick labels.
plt.figure(figsize=(5, 6))
temperature_color = (0.3, 0.4, 0.5)
ax1 = plt.gca()
iplt.plot(
theta_1000m,
linewidth=2,
color=temperature_color,
alpha=0.75,
)
ax1.set_xlabel("Potential Temperature / K", color=temperature_color)
ax1.set_ylabel("Depth / m")
for ticklabel in ax1.get_xticklabels():
ticklabel.set_color(temperature_color)
# To plot salinity in the same axes we use twiny(). We'll use a different
# color to identify salinity.
salinity_color = (0.6, 0.1, 0.15)
ax2 = plt.gca().twiny()
iplt.plot(
salinity_1000m,
linewidth=2,
color=salinity_color,
alpha=0.75,
)
ax2.set_xlabel("Salinity / PSU", color=salinity_color)
for ticklabel in ax2.get_xticklabels():
ticklabel.set_color(salinity_color)
plt.tight_layout()
iplt.show()
# Now plot a T-S diagram using scatter. We'll use all the profiles here,
# and each point will be coloured according to its depth.
plt.figure(figsize=(6, 6))
depth_values = theta.coord("depth").points
for s, t in iris.iterate.izip(salinity, theta, coords="depth"):
iplt.scatter(s, t, c=depth_values, marker="+", cmap="RdYlBu_r")
ax = plt.gca()
ax.set_xlabel("Salinity / PSU")
ax.set_ylabel("Potential Temperature / K")
cb = plt.colorbar(orientation="horizontal")
cb.set_label("Depth / m")
plt.tight_layout()
iplt.show()
if __name__ == "__main__":
main()
| lgpl-3.0 |
janmtl/pypsych | tests/data/generators/eprime.py | 1 | 2106 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script for generating mock EPrime test data
"""
import pandas as pd
import numpy as np
import io
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from pypsych.config import Config
def generate_mock_eprime_data(config_path, task_name, begaze_data, sched_path):
"""Generate mock eprime data based on mock begaze data."""
superconfig = Config(path=config_path)
superconfig.load()
config = superconfig.get_subconfig(task_name, 'EPrime')
bg = begaze_data['merged_labels'][['Condition', 'ID']]
ed = np.random.randint(0, 10, (bg.shape[0], len(config['channels'])))
ep = pd.DataFrame(data=ed, index=bg.index, columns=config['channels'])
df = pd.concat([bg, ep], axis=1, join='inner')
df.rename(columns={'ID': 'Img'}, inplace=True)
result = []
for _, row in df.iterrows():
props = ["\t" + str(idx) + ': ' + str(val)
for idx, val in zip(list(row.index), list(row))]
result.append("\n\n".join(props))
result = ('\n\n\t*** LogFrame End ***\n\n'
'\tLevel: 2\n\n'
'\t*** LogFrame Start ***\n\n').join(result)
prestring = ('*** Header Start ***\n\n'
'GARBAGE\n\n'
'*** Header End ***\n\n'
'\tLevel: 2\n\n'
'\t*** LogFrame Start ***\n\n')
result = prestring + result + '\n\n\t*** LogFrame End ***'
return {'df': df, 'raw': result}
def save_mock_eprime_data(output_path, data, subject_id, task_order, task_name):
"""Save the mock eprime files to output_path."""
base_path = ''.join([output_path,
task_name,
'_',
str(subject_id),
str(task_order)])
raw_path = ''.join([base_path, '_eprime.txt'])
df_path = ''.join([base_path, '_eprime_df.txt'])
with io.open(raw_path, 'w', encoding="utf-16") as f:
f.write(unicode(data['raw']))
data['df'].to_csv(df_path, sep="\t")
pass
| bsd-3-clause |
AlexanderFabisch/scikit-learn | sklearn/metrics/pairwise.py | 9 | 45248 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal, or the equivalent
check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter *dense_output* for sparse output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
dingocuster/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
nguy/brawl4d | LMA/controller.py | 1 | 10240 | """ Support for LMA data display in brawl4d.
These are meant to be lightweight wrappers to coordinate data formats
understood by the lmatools package.
"""
import numpy as np
from lmatools.flashsort.autosort.LMAarrayFile import LMAdataFile
from stormdrain.bounds import Bounds, BoundsFilter
from stormdrain.data import NamedArrayDataset, indexed
from stormdrain.pipeline import Branchpoint, coroutine, ItemModifier
from stormdrain.support.matplotlib.artistupdaters import PanelsScatterController
from stormdrain.support.matplotlib.poly_lasso import LassoPayloadController
class LMAAnimator(object):
def __init__(self, duration, variable='time'):
self.tstart = time.time()
self.duration = duration
def draw_frame(self, animator, time_fraction):
pass
def init_draw(self, animator):
pass
class LMAController(object):
""" Manages bounds object with LMA-specific criteria. Convenience functions for loading LMA data.
"""
z_alt_mapping = {'z':('alt', (lambda v: (v[0]*1.0e3 - 1.0e3, v[1]*1.0e3 + 1.0e3)) ) }
def __init__(self, *args, **kwargs):
super(LMAController, self).__init__(*args, **kwargs)
self.bounds = Bounds(chi2=(0.0, 1.0), stations=(6, 99))
self.default_color_bounds = Bounds(parent=self.bounds, charge=(-1,1))
self.datasets = set()
self.flash_datasets = set()
def pipeline_for_dataset(self, d, panels,
names4d=('lon', 'lat', 'alt', 'time'),
transform_mapping=None,
scatter_kwargs = {}
):
""" Set 4d_names to the spatial coordinate names in d that provide
longitude, latitude, altitude, and time. Default of
lon, lat, alt, and time which are assumed to be in deg, deg, meters, seconds
entries in the scatter_kwargs dictionary are passed as kwargs to the matplotlib
scatter call.
"""
# Set up dataset -> time-height bound filter -> brancher
branch = Branchpoint([])
brancher = branch.broadcast()
# strictly speaking, z in the map projection and MSL alt aren't the same - z is somewhat distorted by the projection.
# therefore, add some padding. filtered again later after projection.
quality_filter = BoundsFilter(target=brancher, bounds=self.bounds).filter()
if transform_mapping is None:
transform_mapping = self.z_alt_mapping
# Use 'time', which is the name in panels.bounds, and not names4d[3], which should
# is linked to 'time' by transform_mapping if necessary
bound_filter = BoundsFilter(target=quality_filter, bounds=panels.bounds,
restrict_to=('time'), transform_mapping=transform_mapping)
filterer = bound_filter.filter()
d.target = filterer
# Set up brancher -> coordinate transform -> final_filter -> mutli-axis scatter updater
scatter_ctrl = PanelsScatterController(
panels=panels,
color_field=names4d[3],
default_color_bounds=self.default_color_bounds,
**scatter_kwargs)
scatter_outlet_broadcaster = scatter_ctrl.branchpoint
scatter_updater = scatter_outlet_broadcaster.broadcast()
final_bound_filter = BoundsFilter(target=scatter_updater, bounds=panels.bounds)
final_filterer = final_bound_filter.filter()
cs_transformer = panels.cs.project_points(
target=final_filterer,
x_coord='x', y_coord='y', z_coord='z',
lat_coord=names4d[1], lon_coord=names4d[0], alt_coord=names4d[2],
distance_scale_factor=1.0e-3)
branch.targets.add(cs_transformer)
# return each broadcaster so that other things can tap into results of transformation of this dataset
return branch, scatter_ctrl
@coroutine
def flash_stat_printer(self, min_points=10):
while True:
ev, fl = (yield)
template = "{0} of {1} flashes have > {3} points. Their average area = {2:5.1f} km^2"
N = len(fl)
good = (fl['n_points'] >= min_points)
N_good = len(fl[good])
area = np.mean(fl['area'][good])
print template.format(N_good, N, area, min_points)
def flash_stats_for_dataset(self, d, selection_broadcaster):
flash_stat_branchpoint = Branchpoint([self.flash_stat_printer()])
flash_stat_brancher = flash_stat_branchpoint.broadcast()
@coroutine
def flash_data_for_selection(target, flash_id_key = 'flash_id'):
""" Accepts an array of event data from the pipeline, and sends
event and flash data.
"""
while True:
ev = (yield) # array of event data
fl_dat = d.flash_data
flash_ids = set(ev[flash_id_key])
flashes = np.fromiter(
(fl for fl in fl_dat if fl[flash_id_key] in flash_ids),
dtype=fl_dat.dtype)
target.send((ev, flashes))
selection_broadcaster.targets.add(flash_data_for_selection(flash_stat_brancher))
return flash_stat_branchpoint
@indexed()
def read_dat(self, *args, **kwargs):
""" All args and kwargs are passed to the LMAdataFile object from lmatools"""
lma = LMAdataFile(*args, **kwargs)
stn = lma.stations # adds stations to lma.data as a side-effect
d = NamedArrayDataset(lma.data)
self.datasets.add(d)
return d
def load_dat_to_panels(self, panels, *args, **kwargs):
""" All args and kwargs are passed to the LMAdataFile object from lmatools"""
d = self.read_dat(*args, **kwargs)
post_filter_brancher, scatter_ctrl = self.pipeline_for_dataset(d, panels)
branch_to_scatter_artists = scatter_ctrl.branchpoint
# ask for a copy of the array from each selection operation, so that
# it's saved and ready for any lasso operations
charge_lasso = LassoChargeController(
target=ItemModifier(
target=d.update(field_names=['charge']),
item_name='charge').modify())
branch_to_scatter_artists.targets.add(charge_lasso.cache_segment.cache_segment())
return d, post_filter_brancher, scatter_ctrl, charge_lasso
@indexed(index_name='hdf_row_idx')
def read_hdf5(self, LMAfileHDF):
try:
import tables
except ImportError:
print "couldn't import pytables"
return None
from hdf5_lma import HDF5Dataset
# get the HDF5 table name
LMAh5 = tables.openFile(LMAfileHDF, 'r')
table_names = LMAh5.root.events._v_children.keys()
table_path = '/events/' + table_names[0]
LMAh5.close()
d = HDF5Dataset(LMAfileHDF, table_path=table_path, mode='a')
self.datasets.add(d)
if d.flash_table is not None:
print "found flash data"
return d
def load_hdf5_to_panels(self, panels, LMAfileHDF, scatter_kwargs={}):
d = self.read_hdf5(LMAfileHDF)
post_filter_brancher, scatter_ctrl = self.pipeline_for_dataset(d, panels, scatter_kwargs=scatter_kwargs)
branch_to_scatter_artists = scatter_ctrl.branchpoint
charge_lasso = LassoChargeController(
target=ItemModifier(
target=d.update(index_name='hdf_row_idx',
field_names=['charge']),
item_name='charge').modify())
branch_to_scatter_artists.targets.add(charge_lasso.cache_segment.cache_segment())
return d, post_filter_brancher, scatter_ctrl, charge_lasso
def load_hdf5_flashes_to_panels(self, panels, hdf5dataset, min_points=10):
""" Set up a flash dataset display. The sole argument is usually the HDF5
LMA dataset returned by a call to self.load_hdf5_to_panels """
from hdf5_lma import HDF5FlashDataset
if hdf5dataset.flash_table is not None:
point_count_dtype = hdf5dataset.flash_data['n_points'].dtype
self.bounds.n_points = (min_points, np.iinfo(point_count_dtype))
flash_d = HDF5FlashDataset(hdf5dataset)
transform_mapping = {}
transform_mapping['time'] = ('start', (lambda v: (v[0], v[1])) )
transform_mapping['lat'] = ('init_lat', (lambda v: (v[0], v[1])) )
transform_mapping['lon'] = ('init_lon', (lambda v: (v[0], v[1])) )
transform_mapping['z'] = ('init_alt', (lambda v: (v[0]*1.0e3 - 1.0e3, v[1]*1.0e3 + 1.0e3)) )
flash_post_filter_brancher, flash_scatter_ctrl = self.pipeline_for_dataset(flash_d, panels,
transform_mapping=transform_mapping,
names4d=('init_lon', 'init_lat', 'init_alt', 'start') )
for art in flash_scatter_ctrl.artist_outlet_controllers:
# there is no time variable, but the artist updater is set to expect
# time. Patch that up.
if art.coords == ('time', 'z'):
art.coords = ('start', 'z')
# Draw flash markers in a different style
art.artist.set_edgecolor('k')
self.flash_datasets.add(flash_d)
return flash_d, flash_post_filter_brancher, flash_scatter_ctrl
class LassoChargeController(LassoPayloadController):
""" The "charge" attribute is one of {-1, 0, 1} to set
negative, unclassified, or positive charge, or None
to do nothing.
"""
charge = LassoPayloadController.Payload() | bsd-2-clause |
ashokpant/clandmark | python_interface/bin/flandmark_demo.py | 6 | 2152 | import numpy as np
import os
from fnmatch import fnmatch
from py_flandmark import PyFlandmark
from PIL import Image
import ImageDraw
import matplotlib.pyplot as plt
def rgb2gray(rgb):
"""
converts rgb array to grey scale variant
accordingly to fomula taken from wiki
(this function is missing in python)
"""
return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
def read_bbox_from_txt(file_name):
"""
returns 2x2 matrix coordinates of
left upper and right lower corners
of rectangle that contains face stored
in columns of matrix
"""
f = open(file_name)
str = f.read().replace(',', ' ')
f.close()
ret = np.array(map(int,str.split()) ,dtype=np.int32)
ret = ret.reshape((2,2), order='F')
return ret
DIR = '../../../data/Images/'
JPGS = [f for f in os.listdir(DIR) if fnmatch(f, '*.jpg')]
flmrk = PyFlandmark("../../../data/flandmark_model.xml", False)
for jpg_name in JPGS:
file_name = jpg_name[:-4]
img = Image.open(DIR + jpg_name)
arr = rgb2gray(np.asarray(img))
bbox = read_bbox_from_txt(DIR + jpg_name[:-4] + '.det')
d_landmarks = flmrk.detect(arr, bbox)
n = d_landmarks.shape[1]
print "test detect method"
im = Image.fromarray(arr)
img_dr = ImageDraw.Draw(im)
img_dr.rectangle([tuple(bbox[:,0]), tuple(bbox[:,1])], outline="#FF00FF")
r = 2.
for i in xrange(n):
x = d_landmarks[0,i]
y = d_landmarks[1,i]
img_dr.ellipse((x-r, y-r, x+r, y+r), fill=0.)
plt.imshow(np.asarray(im), cmap = plt.get_cmap('gray'))
plt.show()
print "test detect method"
frame = flmrk.get_normalized_frame(arr, bbox)[0]
frame = frame.astype(np.double)
im = Image.fromarray(frame)
plt.imshow(np.asarray(im), cmap = plt.get_cmap('gray'))
plt.show()
print "test detect_base method"
landmarks = flmrk.detect_base(frame)
im = Image.fromarray(frame)
img_dr = ImageDraw.Draw(im)
r = 2.
for i in xrange(n):
x = landmarks[0,i]
y = landmarks[1,i]
img_dr.ellipse((x-r, y-r, x+r, y+r), fill=0.)
plt.imshow(np.asarray(im), cmap = plt.get_cmap('gray'))
plt.show()
print "test psi method"
psi = flmrk.get_psi(frame, landmarks.astype(np.int32), bbox)
#flmrk.get_psi(d_landmarks, arr, bbox)
break | gpl-3.0 |
abbeymiles/aima-python | submissions/Blue/myNN.py | 10 | 3071 | from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.Blue import music
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
musicATRB = DataFrame()
musicATRB.data = []
targetData = []
'''
Extract data from the CORGIS Music Library.
Most 'hit' songs average 48-52 bars and no more than ~3 minutes (180 seconds)...
'''
allSongs = music.get_songs()
for song in allSongs:
try:
length = float(song['song']["duration"])
targetData.append(length)
genre = song['artist']['terms'] #String
title = song['song']['title'] #String
# release = float(song['song']['Release'])
musicATRB.data.append([genre, title])
except:
traceback.print_exc()
musicATRB.feature_names = [
'Genre',
'Title',
'Release',
'Length',
]
musicATRB.target = []
def musicTarget(release):
if (song['song']['duration'] <= 210
): #if the song is less that 3.5 minutes (210 seconds) long
return 1
return 0
for i in targetData:
tt = musicTarget(i)
musicATRB.target.append(tt)
musicATRB.target_names = [
'Not a hit song',
'Could be a hit song',
]
Examples = {
'Music': musicATRB,
}
'''
Make a customn classifier,
'''
mlpc = MLPClassifier(
hidden_layer_sizes = (100,),
activation = 'relu',
solver='sgd', # 'adam',
alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
shuffle = True,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
'''
Try scaling the data.
'''
musicScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(musicATRB.data)
musicScaled.data = scaleGrid(musicATRB.data)
musicScaled.feature_names = musicATRB.feature_names
musicScaled.target = musicATRB.target
musicScaled.target_names = musicATRB.target_names
Examples = {
'musicDefault': {
'frame': musicATRB,
},
'MusicSGD': {
'frame': musicATRB,
'mlpc': mlpc
},
'MusisScaled': {
'frame': musicScaled,
},
} | mit |
jblackburne/scikit-learn | sklearn/manifold/setup.py | 24 | 1279 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.c"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.c"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
MartinDelzant/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
juanshishido/okcupid | utils/permutation.py | 1 | 2439 | import numpy as np
from scipy.stats import ttest_ind
from sklearn.metrics import accuracy_score
def _diff_means(m, arr):
"""Calculate the difference-in-means statistic.
This is based on an input array, `arr`, where the first
`m` observations correspond to a particular class.
Parameters
----------
m : int
Number of samples in the first class
arr : np.ndarray
Data for both classes
Returns
-------
float
"""
return np.mean(arr[:m]) - np.mean(arr[m:])
def _permute(a, b, comparison='predictions', permutations=10000):
"""Estimate of the permutation-based p-value
Parameters
----------
a : np.ndarray
Data for one class or
ground truth (correct) labels
b : np.ndarray
Data for another class or
predicted labels, as returned by a classifier
comparison : str
{'predictions', 'means'}
permutations : int, optional
Number of permutations
Returns
-------
p_value : float
The proportion of times a value as extreme
as the observed estimate is seen
Notes
-----
This calculates the two-tailed p-value
"""
assert comparison in ('predictions', 'means')
np.random.seed(42)
if comparison == 'predictions':
c = b.copy()
compare = accuracy_score
else:
c = np.append(a, b)
a = a.shape[0]
compare = _diff_means
baseline = compare(a, c)
v = []
for _ in range(permutations):
np.random.shuffle(c)
v.append(compare(a, c))
p_value = (np.abs(np.array(v)) >= np.abs(baseline)).sum() / permutations
return p_value
def print_pvalues(a, b):
"""Wrapper function for printing meand and p-values
both permutation-based and classical
Parameters
----------
a : np.ndarray
Data for one class or
ground truth (correct) labels
b : np.ndarray
Data for another class or
predicted labels, as returned by a classifier
Returns
-------
None
"""
assert isinstance(a, np.ndarray) and isinstance(b, np.ndarray)
rnd = lambda x: np.round(x, 8)
permutation = _permute(a, b, 'means')
classical = ttest_ind(a, b, equal_var=False)[1]
print("[means] 'a':", rnd(a.mean()), "'b':", rnd(b.mean()))
print("p-values:")
print(" [permutation]:", rnd(permutation))
print(" [classical]: ", rnd(classical))
| mit |
jrbourbeau/cr-composition | processing/legacy/anisotropy/random_trials/process_kstest.py | 2 | 7627 | #!/usr/bin/env python
import os
import argparse
import numpy as np
import pandas as pd
import pycondor
import comptools as comp
if __name__ == "__main__":
p = argparse.ArgumentParser(
description='Extracts and saves desired information from simulation/data .i3 files')
p.add_argument('-c', '--config', dest='config',
default='IC86.2012',
choices=['IC79', 'IC86.2012', 'IC86.2013', 'IC86.2014', 'IC86.2015'],
help='Detector configuration')
p.add_argument('--low_energy', dest='low_energy',
default=False, action='store_true',
help='Only use events with energy < 10**6.75 GeV')
p.add_argument('--n_side', dest='n_side', type=int,
default=64,
help='Number of times to split the DataFrame')
p.add_argument('--chunksize', dest='chunksize', type=int,
default=1000,
help='Number of lines used when reading in DataFrame')
p.add_argument('--n_batches', dest='n_batches', type=int,
default=50,
help='Number batches running in parallel for each ks-test trial')
p.add_argument('--ks_trials', dest='ks_trials', type=int,
default=100,
help='Number of random maps to generate')
p.add_argument('--overwrite', dest='overwrite',
default=False, action='store_true',
help='Option to overwrite reference map file, '
'if it alreadu exists')
p.add_argument('--test', dest='test',
default=False, action='store_true',
help='Option to run small test version')
args = p.parse_args()
if args.test:
args.ks_trials = 20
args.n_batches = 10000
args.chunksize = 100
# Define output directories
error = comp.paths.condor_data_dir + '/ks_test_{}/error'.format(args.config)
output = comp.paths.condor_data_dir + '/ks_test_{}/output'.format(args.config)
log = comp.paths.condor_scratch_dir + '/ks_test_{}/log'.format(args.config)
submit = comp.paths.condor_scratch_dir + '/ks_test_{}/submit'.format(args.config)
# Define path to executables
make_maps_ex = os.path.join(comp.paths.project_home,
'processing/anisotropy/ks_test_multipart',
'make_maps.py')
merge_maps_ex = os.path.join(comp.paths.project_home,
'processing/anisotropy/ks_test_multipart',
'merge_maps.py')
save_pvals_ex = os.path.join(comp.paths.project_home,
'processing/anisotropy/ks_test_multipart',
'save_pvals.py')
# Create Dagman instance
dag_name = 'anisotropy_kstest_{}'.format(args.config)
if args.test:
dag_name += '_test'
dagman = pycondor.Dagman(dag_name, submit=submit, verbose=1)
# Create Job for saving ks-test p-values for each trial
save_pvals_name = 'save_pvals_{}'.format(args.config)
if args.low_energy:
save_pvals_name += '_lowenergy'
save_pvals_job = pycondor.Job(save_pvals_name, save_pvals_ex,
error=error, output=output,
log=log, submit=submit,
verbose=1)
save_pvals_infiles_0 = []
save_pvals_infiles_1 = []
dagman.add_job(save_pvals_job)
outdir = os.path.join(comp.paths.comp_data_dir, args.config + '_data',
'anisotropy', 'random_splits')
if args.test:
outdir = os.path.join(outdir, 'test')
for trial_num in range(args.ks_trials):
# Create map_maps jobs for this ks_trial
make_maps_name = 'make_maps_{}_trial-{}'.format(args.config, trial_num)
if args.low_energy:
make_maps_name += '_lowenergy'
make_maps_job = pycondor.Job(make_maps_name, make_maps_ex,
error=error, output=output,
log=log, submit=submit,
verbose=1)
dagman.add_job(make_maps_job)
merge_maps_infiles_0 = []
merge_maps_infiles_1 = []
for batch_idx in range(args.n_batches):
if args.test and batch_idx > 2:
break
outfile_sample_1 = os.path.join(outdir,
'random_split_1_trial-{}_batch-{}.fits'.format(trial_num, batch_idx))
outfile_sample_0 = os.path.join(outdir,
'random_split_0_trial-{}_batch-{}.fits'.format(trial_num, batch_idx))
make_maps_arg_list = []
make_maps_arg_list.append('--config {}'.format(args.config))
make_maps_arg_list.append('--n_side {}'.format(args.n_side))
make_maps_arg_list.append('--chunksize {}'.format(args.chunksize))
make_maps_arg_list.append('--n_batches {}'.format(args.n_batches))
make_maps_arg_list.append('--batch_idx {}'.format(batch_idx))
make_maps_arg_list.append('--outfile_sample_0 {}'.format(outfile_sample_0))
make_maps_arg_list.append('--outfile_sample_1 {}'.format(outfile_sample_1))
make_maps_arg = ' '.join(make_maps_arg_list)
if args.low_energy:
make_maps_arg += ' --low_energy'
make_maps_job.add_arg(make_maps_arg)
# Add this outfile to the list of infiles for merge_maps_job
merge_maps_infiles_0.append(outfile_sample_0)
merge_maps_infiles_1.append(outfile_sample_1)
for sample_idx, input_file_list in enumerate([merge_maps_infiles_0,
merge_maps_infiles_1]):
merge_maps_name = 'merge_maps_{}_trial-{}_split-{}'.format(args.config, trial_num, sample_idx)
if args.low_energy:
merge_maps_name += '_lowenergy'
merge_maps_job = pycondor.Job(merge_maps_name, merge_maps_ex,
error=error, output=output,
log=log, submit=submit,
verbose=1)
# Ensure that make_maps_job completes before merge_maps_job begins
make_maps_job.add_child(merge_maps_job)
merge_maps_job.add_child(save_pvals_job)
dagman.add_job(merge_maps_job)
merge_infiles_str = ' '.join(input_file_list)
# Assemble merged output file path
merge_outfile = os.path.join(outdir, 'random_split_{}_trial-{}.fits'.format(sample_idx, trial_num))
merge_maps_arg = '--infiles {} --outfile {}'.format(merge_infiles_str, merge_outfile)
merge_maps_job.add_arg(merge_maps_arg)
if sample_idx == 0:
save_pvals_infiles_0.append(merge_outfile)
else:
save_pvals_infiles_1.append(merge_outfile)
save_pvals_infiles_0_str = ' '.join(save_pvals_infiles_0)
save_pvals_infiles_1_str = ' '.join(save_pvals_infiles_1)
if args.low_energy:
outfile_basename = 'ks_test_dataframe_lowenergy.hdf'
else:
outfile_basename = 'ks_test_dataframe.hdf'
outfile = os.path.join(outdir, outfile_basename)
save_pvals_arg = '--infiles_sample_0 {} --infiles_sample_1 {} ' \
'--outfile {}'.format(save_pvals_infiles_0_str, save_pvals_infiles_1_str, outfile)
save_pvals_job.add_arg(save_pvals_arg)
dagman.build_submit(fancyname=True)
| mit |
bradleyhd/netsim | nodes_vs_routing_speed.py | 1 | 2878 | import matplotlib.pyplot as plt
import numpy as np
import math
from scipy.optimize import curve_fit
def linear(x, a, b):
return a * x + b
def quadratic(x, a, b, c):
return a * x**2 + b * x + c
def exponential(x, a, b, c):
return a * x**b + c
fig = plt.figure(num=None, figsize=(12, 8), dpi=300, facecolor='k', edgecolor='k')
xs = [[1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292]]
ys = [[0.00013309850001519408, 0.00059208550001699223, 0.002604027000003839, 0.004665461000030291, 0.014662985999962075, 0.023410306499954459, 0.041176939000251878], [0.00014861549998101964, 0.00055641999999522795, 0.002577900000005684, 0.0054275369999459144, 0.021226498000032734, 0.029786237500047719, 0.059782716000881919], [0.00012334000000180367, 0.00043368899999052246, 0.0020054734999632728, 0.005848614000001362, 0.014609930999995413, 0.019599954500336025, 0.028973604500606598], [0.00012613299999486571, 0.00044437049999146438, 0.0021501399999692694, 0.0055929929999933847, 0.019908546500118973, 0.039582631500252319, 0.054390303499531001]]
ys = np.array(ys) * 1000
def graph(i, label, color, marker, l_marker):
y = np.array(ys[i])
x = np.array(xs[i])
xl = np.linspace(np.min(x), np.max(x), 500)
popt, pcov = curve_fit(exponential, x, y)
plt.scatter(x, y, label=label, color=color, marker=marker)
plt.plot(xl, exponential(xl, *popt), color=color, linestyle=l_marker)
blue = '#5738FF'
purple = '#E747E7'
orange = '#E7A725'
green = '#A1FF47'
red = '#FF1E43'
gray = '#333333'
white = 'w'
graph(0, 'EDS5 - original graph', red, 'o', '--')
graph(1, 'N5 - original graph', purple, 's', '--')
graph(2, 'EDS5 - decision graph', blue, '^', '--')
graph(3, 'N5 - decision graph', white, 'D', '--')
ax = fig.gca()
plt.title('Effects of Node Ordering on Routing Speed', color=white)
plt.xlabel('Effective $\\vert V\/\\vert$')
plt.ylabel('Routing Time (ms)')
plt.axes().set_axis_bgcolor('black')
ax.xaxis.label.set_color(white)
ax.yaxis.label.set_color(white)
ax.tick_params(axis='x', colors=white)
ax.tick_params(axis='y', colors=white)
ax.spines['bottom'].set_color(white)
ax.spines['top'].set_color(white)
ax.spines['left'].set_color(white)
ax.spines['right'].set_color(white)
legend = plt.legend(loc=0, numpoints=1, framealpha=0.0)
legend.get_frame().set_facecolor('k')
max_x = np.max(np.array(xs))
max_y = np.max(np.array(ys))
min_x = np.min(np.array(xs))
min_y = 0 - (max_y * 0.01)
min_x = 0 - (max_x * 0.01)
max_x *= 1.01
max_y *= 1.01
plt.axes().set_xlim([min_x, max_x])
plt.axes().set_ylim([min_y, max_y])
for text in legend.get_texts():
text.set_color(white)
# plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.savefig('nodes_vs_routing_speed.png', transparent=True)
#plt.show() | gpl-3.0 |
esc/dask | dask/dataframe/shuffle.py | 4 | 2967 | from itertools import count
from collections import Iterator
from math import ceil
from toolz import merge, accumulate, merge_sorted
import toolz
from operator import getitem, setitem
import pandas as pd
import numpy as np
from pframe import pframe
from .. import threaded
from .core import DataFrame, Series, get, names
from ..compatibility import unicode
from ..utils import ignoring
tokens = ('-%d' % i for i in count(1))
def set_index(f, index, npartitions=None, **kwargs):
""" Set DataFrame index to new column
Sorts index and realigns Dataframe to new sorted order. This shuffles and
repartitions your data.
"""
npartitions = npartitions or f.npartitions
if not isinstance(index, Series):
index2 = f[index]
else:
index2 = index
divisions = (index2
.quantiles(np.linspace(0, 100, npartitions+1)[1:-1])
.compute())
return f.set_partition(index, divisions, **kwargs)
partition_names = ('set_partition-%d' % i for i in count(1))
def set_partition(f, index, divisions, get=threaded.get, **kwargs):
""" Set new partitioning along index given divisions """
divisions = unique(divisions)
name = next(names)
if isinstance(index, Series):
assert index.divisions == f.divisions
dsk = dict(((name, i), (f._partition_type.set_index, block, ind))
for i, (block, ind) in enumerate(zip(f._keys(), index._keys())))
f2 = type(f)(merge(f.dask, index.dask, dsk), name,
f.column_info, f.divisions)
else:
dsk = dict(((name, i), (f._partition_type.set_index, block, index))
for i, block in enumerate(f._keys()))
f2 = type(f)(merge(f.dask, dsk), name, f.column_info, f.divisions)
head = f2.head()
pf = pframe(like=head, divisions=divisions, **kwargs)
def append(block):
pf.append(block)
return 0
f2.map_blocks(append).compute(get=get)
pf.flush()
return from_pframe(pf)
def from_pframe(pf):
""" Load dask.array from pframe """
name = next(names)
dsk = dict(((name, i), (pframe.get_partition, pf, i))
for i in range(pf.npartitions))
return DataFrame(dsk, name, pf.columns, pf.divisions)
def unique(divisions):
""" Polymorphic unique function
>>> list(unique([1, 2, 3, 1, 2, 3]))
[1, 2, 3]
>>> unique(np.array([1, 2, 3, 1, 2, 3]))
array([1, 2, 3])
>>> unique(pd.Categorical(['Alice', 'Bob', 'Alice'], ordered=False))
[Alice, Bob]
Categories (2, object): [Alice, Bob]
"""
if isinstance(divisions, np.ndarray):
return np.unique(divisions)
if isinstance(divisions, pd.Categorical):
return pd.Categorical.from_codes(np.unique(divisions.codes),
divisions.categories, divisions.ordered)
if isinstance(divisions, (tuple, list, Iterator)):
return tuple(toolz.unique(divisions))
raise NotImplementedError()
| bsd-3-clause |
q1ang/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/doc/mpl_toolkits/axes_grid/examples/demo_parasite_axes2.py | 16 | 1208 | from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import matplotlib.pyplot as plt
if 1:
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
par2 = host.twinx()
offset = 60
new_fixed_axis = par2.get_grid_helper().new_fixed_axis
par2.axis["right"] = new_fixed_axis(loc="right",
axes=par2,
offset=(offset, 0))
par2.axis["right"].toggle(all=True)
host.set_xlim(0, 2)
host.set_ylim(0, 2)
host.set_xlabel("Distance")
host.set_ylabel("Density")
par1.set_ylabel("Temperature")
par2.set_ylabel("Velocity")
p1, = host.plot([0, 1, 2], [0, 1, 2], label="Density")
p2, = par1.plot([0, 1, 2], [0, 3, 2], label="Temperature")
p3, = par2.plot([0, 1, 2], [50, 30, 15], label="Velocity")
par1.set_ylim(0, 4)
par2.set_ylim(1, 65)
host.legend()
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
par2.axis["right"].label.set_color(p3.get_color())
plt.draw()
plt.show()
#plt.savefig("Test")
| mit |
woozzu/pylearn2 | pylearn2/scripts/tests/test_print_monitor_cv.py | 48 | 1927 | """
Test print_monitor_cv.py by training on a short TrainCV YAML file and
analyzing the output pickle.
"""
import os
import tempfile
from pylearn2.config import yaml_parse
from pylearn2.scripts import print_monitor_cv
from pylearn2.testing.skip import skip_if_no_sklearn
def test_print_monitor_cv():
"""Test print_monitor_cv.py."""
skip_if_no_sklearn()
handle, filename = tempfile.mkstemp()
trainer = yaml_parse.load(test_print_monitor_cv_yaml %
{'filename': filename})
trainer.main_loop()
# run print_monitor_cv.py main
print_monitor_cv.main(filename)
# run print_monitor_cv.py main with all=True
print_monitor_cv.main(filename, all=True)
# cleanup
os.remove(filename)
test_print_monitor_cv_yaml = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 10,
dim: 10,
num_classes: 2,
},
},
model: !obj:pylearn2.models.mlp.MLP {
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 8,
irange: 0.05,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 2,
irange: 0.05,
},
],
nvis: 10,
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 5,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
},
save_path: %(filename)s,
}
"""
| bsd-3-clause |
vortex-ape/scikit-learn | examples/bicluster/plot_bicluster_newsgroups.py | 39 | 5911 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
"""
from __future__ import print_function
from collections import defaultdict
import operator
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
print(__doc__)
def number_normalizer(tokens):
""" Map all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
return ("#NUMBER" if token[0].isdigit() else token for token in tokens)
class NumberNormalizingVectorizer(TfidfVectorizer):
def build_tokenizer(self):
tokenize = super(NumberNormalizingVectorizer, self).build_tokenizer()
return lambda doc: list(number_normalizer(tokenize(doc)))
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = NumberNormalizingVectorizer(stop_words='english', min_df=5)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis],
# cols].sum() but much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
fmacias64/spyre | setup.py | 3 | 1217 | from setuptools import setup, find_packages
setup(
name='DataSpyre',
version='0.2.0',
description='Spyre makes it easy to build interactive web applications, and requires no knowledge of HTML, CSS, or Javascript.',
url='https://github.com/adamhajari/spyre',
author='Adam Hajari',
author_email='adam@nextbigsound.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: CherryPy',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Environment :: Web Environment',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
],
keywords='web application template data visualization',
include_package_data = True, # include everything in source control
packages = ['spyre'], # include all packages under src
package_data = {
'': ['*.js','*.css','*.html'],
'public': ['js/*.js','css/*.css'],
},
install_requires=[
"numpy",
"pandas",
"cherrypy",
"jinja2",
"matplotlib",
]
)
| mit |
davidgardenier/frbpoppy | tests/lognlogs/local.py | 1 | 1611 | """Check the log N log F slope of a local population."""
import numpy as np
import matplotlib.pyplot as plt
from frbpoppy import CosmicPopulation, Survey, SurveyPopulation
from frbpoppy.population import unpickle
from tests.convenience import plot_aa_style, rel_path
MAKE = True
if MAKE:
population = CosmicPopulation.simple(1e5, generate=True)
survey = Survey('perfect')
surv_pop = SurveyPopulation(population, survey)
surv_pop.name = 'lognlogflocal'
surv_pop.save()
else:
surv_pop = unpickle('lognlogflocal')
# Get parameter
parms = surv_pop.frbs.fluence
min_p = min(parms)
max_p = max(parms)
# Bin up
min_f = np.log10(min(parms))
max_f = np.log10(max(parms))
log_bins = np.logspace(min_f, max_f, 50)
hist, edges = np.histogram(parms, bins=log_bins)
n_gt_s = np.cumsum(hist[::-1])[::-1]
# Calculate alpha
alpha, alpha_err, norm = surv_pop.frbs.calc_logn_logs(parameter='fluence',
min_p=min_p,
max_p=max_p)
print(alpha, alpha_err, norm)
xs = 10**((np.log10(edges[:-1]) + np.log10(edges[1:])) / 2)
xs = xs[xs >= min_p]
xs = xs[xs <= max_p]
ys = [norm*x**(alpha) for x in xs]
plot_aa_style()
fig = plt.figure()
ax = fig.add_subplot(111)
plt.step(edges[:-1], n_gt_s, where='post')
plt.plot(xs, ys, linestyle='--',
label=rf'$\alpha$ = {alpha:.3} $\pm$ {round(abs(alpha_err), 2)}')
plt.xlabel('Fluence (Jy ms)')
plt.ylabel(r'N(${>}Fluence$)')
plt.xscale('log')
plt.yscale('log')
plt.legend()
plt.tight_layout()
plt.savefig(rel_path('plots/logn_logf_local.pdf'))
| mit |
wooga/airflow | tests/providers/presto/hooks/test_presto.py | 5 | 4331 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest import mock
from unittest.mock import patch
from prestodb.transaction import IsolationLevel
from airflow.models import Connection
from airflow.providers.presto.hooks.presto import PrestoHook
class TestPrestoHookConn(unittest.TestCase):
def setUp(self):
super().setUp()
self.connection = Connection(
login='login',
password='password',
host='host',
schema='hive',
)
class UnitTestPrestoHook(PrestoHook):
conn_name_attr = 'presto_conn_id'
self.db_hook = UnitTestPrestoHook()
self.db_hook.get_connection = mock.Mock()
self.db_hook.get_connection.return_value = self.connection
@patch('airflow.providers.presto.hooks.presto.prestodb.auth.BasicAuthentication')
@patch('airflow.providers.presto.hooks.presto.prestodb.dbapi.connect')
def test_get_conn(self, mock_connect, mock_basic_auth):
self.db_hook.get_conn()
mock_connect.assert_called_once_with(catalog='hive', host='host', port=None, http_scheme='http',
schema='hive', source='airflow', user='login', isolation_level=0,
auth=mock_basic_auth('login', 'password'))
class TestPrestoHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.cur = mock.MagicMock()
self.conn = mock.MagicMock()
self.conn.cursor.return_value = self.cur
conn = self.conn
class UnitTestPrestoHook(PrestoHook):
conn_name_attr = 'test_conn_id'
def get_conn(self):
return conn
def get_isolation_level(self):
return IsolationLevel.READ_COMMITTED
self.db_hook = UnitTestPrestoHook()
@patch('airflow.hooks.dbapi_hook.DbApiHook.insert_rows')
def test_insert_rows(self, mock_insert_rows):
table = "table"
rows = [("hello",),
("world",)]
target_fields = None
commit_every = 10
self.db_hook.insert_rows(table, rows, target_fields, commit_every)
mock_insert_rows.assert_called_once_with(table, rows, None, 10)
def test_get_first_record(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchone.return_value = result_sets[0]
self.assertEqual(result_sets[0], self.db_hook.get_first(statement))
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_records(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchall.return_value = result_sets
self.assertEqual(result_sets, self.db_hook.get_records(statement))
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_pandas_df(self):
statement = 'SQL'
column = 'col'
result_sets = [('row1',), ('row2',)]
self.cur.description = [(column,)]
self.cur.fetchall.return_value = result_sets
df = self.db_hook.get_pandas_df(statement)
self.assertEqual(column, df.columns[0])
self.assertEqual(result_sets[0][0], df.values.tolist()[0][0])
self.assertEqual(result_sets[1][0], df.values.tolist()[1][0])
self.cur.execute.assert_called_once_with(statement, None)
| apache-2.0 |
billy-inn/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
florentchandelier/zipline | tests/data/bundles/test_csvdir.py | 1 | 5092 | from __future__ import division
import numpy as np
import pandas as pd
from zipline.utils.calendars import get_calendar
from zipline.data.bundles import ingest, load, bundles
from zipline.testing import test_resource_path
from zipline.testing.fixtures import ZiplineTestCase
from zipline.testing.predicates import assert_equal
from zipline.utils.functional import apply
class CSVDIRBundleTestCase(ZiplineTestCase):
symbols = 'AAPL', 'IBM', 'KO', 'MSFT'
asset_start = pd.Timestamp('2012-01-03', tz='utc')
asset_end = pd.Timestamp('2014-12-31', tz='utc')
bundle = bundles['csvdir']
calendar = get_calendar(bundle.calendar_name)
start_date = calendar.first_session
end_date = calendar.last_session
api_key = 'ayylmao'
columns = 'open', 'high', 'low', 'close', 'volume'
def _expected_data(self, asset_finder):
sids = {
symbol: asset_finder.lookup_symbol(
symbol,
self.asset_start,
).sid
for symbol in self.symbols
}
def per_symbol(symbol):
df = pd.read_csv(
test_resource_path('csvdir_samples', 'csvdir',
'daily', symbol + '.csv.gz'),
parse_dates=['date'],
index_col='date',
usecols=[
'open',
'high',
'low',
'close',
'volume',
'date',
'dividend',
'split',
],
na_values=['NA'],
)
df['sid'] = sids[symbol]
return df
all_ = pd.concat(map(per_symbol, self.symbols)).set_index(
'sid',
append=True,
).unstack()
# fancy list comprehension with statements
@list
@apply
def pricing():
for column in self.columns:
vs = all_[column].values
if column == 'volume':
vs = np.nan_to_num(vs)
yield vs
adjustments = [[5572, 5576, 5595, 5634, 5639, 5659, 5698, 5699,
5701, 5702, 5722, 5760, 5764, 5774, 5821, 5822,
5829, 5845, 5884, 5885, 5888, 5908, 5947, 5948,
5951, 5972, 6011, 6020, 6026, 6073, 6080, 6096,
6135, 6136, 6139, 6157, 6160, 6198, 6199, 6207,
6223, 6263, 6271, 6277],
[5572, 5576, 5595, 5634, 5639, 5659, 5698, 5699,
5701, 5702, 5722, 5760, 5764, 5774, 5821, 5822,
5829, 5845, 5884, 5885, 5888, 5908, 5947, 5948,
5951, 5972, 6011, 6020, 6026, 6073, 6080, 6096,
6135, 6136, 6139, 6157, 6160, 6198, 6199, 6207,
6223, 6263, 6271, 6277],
[5572, 5576, 5595, 5634, 5639, 5659, 5698, 5699,
5701, 5702, 5722, 5760, 5764, 5774, 5821, 5822,
5829, 5845, 5884, 5885, 5888, 5908, 5947, 5948,
5951, 5972, 6011, 6020, 6026, 6073, 6080, 6096,
6135, 6136, 6139, 6157, 6160, 6198, 6199, 6207,
6223, 6263, 6271, 6277],
[5572, 5576, 5595, 5634, 5639, 5659, 5698, 5699,
5701, 5702, 5722, 5760, 5764, 5774, 5821, 5822,
5829, 5845, 5884, 5885, 5888, 5908, 5947, 5948,
5951, 5972, 6011, 6020, 6026, 6073, 6080, 6096,
6135, 6136, 6139, 6157, 6160, 6198, 6199, 6207,
6223, 6263, 6271, 6277],
[5701, 6157]]
return pricing, adjustments
def test_bundle(self):
environ = {
'CSVDIR': test_resource_path('csvdir_samples', 'csvdir')
}
ingest('csvdir', environ=environ)
bundle = load('csvdir', environ=environ)
sids = 0, 1, 2, 3
assert_equal(set(bundle.asset_finder.sids), set(sids))
for equity in bundle.asset_finder.retrieve_all(sids):
assert_equal(equity.start_date, self.asset_start, msg=equity)
assert_equal(equity.end_date, self.asset_end, msg=equity)
sessions = self.calendar.all_sessions
actual = bundle.equity_daily_bar_reader.load_raw_arrays(
self.columns,
sessions[sessions.get_loc(self.asset_start, 'bfill')],
sessions[sessions.get_loc(self.asset_end, 'ffill')],
sids,
)
expected_pricing, expected_adjustments = self._expected_data(
bundle.asset_finder,
)
assert_equal(actual, expected_pricing, array_decimal=2)
adjustments_for_cols = bundle.adjustment_reader.load_adjustments(
self.columns,
sessions,
pd.Index(sids),
)
assert_equal([sorted(adj.keys()) for adj in adjustments_for_cols],
expected_adjustments)
| apache-2.0 |
milankl/swm | calc/misc/c_diss_plot.py | 1 | 3966 | from __future__ import print_function
path = '/home/mkloewer/python/swm/'
import os; os.chdir(path) # change working directory
import numpy as np
from scipy import sparse
import time as tictoc
from netCDF4 import Dataset
import glob
import matplotlib.pyplot as plt
# OPTIONS
runfolder = [2,3]
## read data
for r,i in zip(runfolder,range(len(runfolder))):
runpath = path+'data/run%04i' % r
if i == 0:
u = np.load(runpath+'/u_sub.npy')
v = np.load(runpath+'/v_sub.npy')
h = np.load(runpath+'/h_sub.npy')
time = np.load(runpath+'/t_sub.npy')
print('run %i read.' % r)
else:
u = np.concatenate((u,np.load(runpath+'/u_sub.npy')))
v = np.concatenate((v,np.load(runpath+'/v_sub.npy')))
h = np.concatenate((h,np.load(runpath+'/h_sub.npy')))
time = np.hstack((time,np.load(runpath+'/t_sub.npy')))
print('run %i read.' % r)
t = time / 3600. / 24. # in days
## read param
global param
param = np.load(runpath+'/param.npy').all()
param['dat_type'] = np.float32
# import functions
exec(open(path+'swm_param.py').read())
exec(open(path+'swm_operators.py').read())
exec(open(path+'swm_output.py').read())
param['output'] = 0
set_grad_mat()
set_interp_mat()
set_lapl_mat()
set_coriolis()
tlen = len(time)
## create ouputfolder
try:
os.mkdir(runpath+'/analysis')
except:
pass
## reshape u,v
u = u.reshape((tlen,param['Nu'])).T
v = v.reshape((tlen,param['Nv'])).T
h = h.reshape((tlen,param['NT'])).T
print('Reshape done.')
##
dudx = Gux.dot(u)
dudy = Guy.dot(u)
dvdx = Gvx.dot(v)
dvdy = Gvy.dot(v)
n = 2
D = np.sqrt((dudx - dvdy)**2 + IqT.dot((dudy + dvdx)**2))
Ro = (D.T/f_T)
Rom = Ro.mean(axis=0)
c = (1/(1+Ro)**n).mean(axis=0)
# REYNOLDS, ROSSBY, EKMAN NUMBER MEAN
u_T = IuT.dot(u)
v_T = IvT.dot(v)
print('u,v interpolation done.')
#advective term
adv_u = u_T*Gux.dot(u) + v_T*IqT.dot(Guy.dot(u))
adv_v = u_T*IqT.dot(Gvx.dot(v)) + v_T*Gvy.dot(v)
del u_T,v_T
adv_term = np.sqrt(adv_u**2 + adv_v**2)
del adv_u, adv_v
print('Advection term done.')
#coriolis term
cor_term = (f_T*np.sqrt(IuT.dot(u**2) + IvT.dot(v**2)).T).T
print('Coriolis term done.')
Ro2 = adv_term / cor_term
c2 = (1/(1+Ro2)**n).mean(axis=1)
Ro2m = Ro2.mean(axis=1)
##
levs1 = np.linspace(0,.2,21)
levs2 = np.linspace(0.5,1,21)
fig,axs = plt.subplots(2,3,sharex=True,sharey=True,figsize=(9,5.5))
plt.tight_layout(rect=[-.02,-.03,1.12,.97],w_pad=0.1)
axs[0,0].contourf(param['x_T'],param['y_T'],h2mat(Ro2m),levs1)
axs[0,1].contourf(param['x_T'],param['y_T'],h2mat(Rom),levs1,extend='max')
m1 = axs[0,2].contourf(param['x_T'],param['y_T'],h2mat(Ro[-1,:]),levs1,extend='max')
plt.colorbar(m1,ax=(axs[0,0],axs[0,1],axs[0,2]),ticks=np.arange(0,.22,.04))
axs[1,0].contourf(param['x_T'],param['y_T'],h2mat(c2),levs2)
m21 = axs[1,0].contour(param['x_T'],param['y_T'],h2mat(c2),[0.8],linewidths=0.7)
axs[1,1].contourf(param['x_T'],param['y_T'],h2mat(c),levs2)
m2 = axs[1,2].contourf(param['x_T'],param['y_T'],h2mat(1/(1+Ro[-1,:])**n),levs2,extend='min')
axs[1,2].contour(param['x_T'],param['y_T'],h2mat(1/(1+Ro[-1,:])**n),[0.8],linewidths=0.7)
m22 = axs[1,1].contour(param['x_T'],param['y_T'],h2mat(c),[0.8],linewidths=0.7)
plt.colorbar(m2,ax=(axs[1,0],axs[1,1],axs[1,2]),ticks=np.arange(0.5,1.05,.05))
plt.clabel(m22, inline=1, fontsize=5,fmt='%.1f')
plt.clabel(m21, inline=1, fontsize=5,fmt='%.1f')
axs[0,0].set_xticks([])
axs[0,0].set_yticks([])
axs[0,0].set_title(r'$\overline{R_o} = \overline{\frac{|(\mathbf{u} \cdot \nabla)\mathbf{u}|}{|f\mathbf{u}|}}$')
axs[0,1].set_title(r'$\overline{R_o^*} = \overline{\frac{|D|}{f}}$')
axs[0,2].set_title(r'snapshot: $R_o^*$')
axs[1,0].set_title(r'$(1+\overline{R_o})^{-2}$')
axs[1,1].set_title(r'$(1+\overline{R_o}^*)^{-2}$')
axs[1,2].set_title(r'$(1+R_o^*)^{-2}$')
axs[0,0].set_ylabel('y')
axs[1,0].set_ylabel('y')
axs[1,0].set_xlabel('x')
axs[1,1].set_xlabel('x')
plt.savefig(path+'compare/Ro_scaling.png',dpi=150)
plt.close(fig)
#plt.show()
| gpl-3.0 |
Aasmi/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
OpenMined/PySyft | packages/syft/src/syft/lib/pandas/categorical_dtype.py | 1 | 1173 | # third party
import pandas as pd
# syft relative
from ...generate_wrapper import GenerateWrapper
from ...lib.python.list import List
from ...lib.python.primitive_factory import PrimitiveFactory
from ...proto.lib.pandas.categorical_pb2 import (
PandasCategoricalDtype as PandasCategoricalDtype_PB,
)
def object2proto(obj: pd.CategoricalDtype) -> PandasCategoricalDtype_PB:
# since pd.Index type is not integrated converted obj.categories to List
pd_cat_list = PrimitiveFactory.generate_primitive(value=obj.categories.tolist())
cat_list_proto = pd_cat_list._object2proto()
return PandasCategoricalDtype_PB(
id=cat_list_proto.id, categories=cat_list_proto, ordered=obj.ordered
)
def proto2object(proto: PandasCategoricalDtype_PB) -> pd.CategoricalDtype:
categories = List._proto2object(proto.categories).upcast()
ordered = proto.ordered
return pd.CategoricalDtype(categories=categories, ordered=ordered)
GenerateWrapper(
wrapped_type=pd.CategoricalDtype,
import_path="pandas.CategoricalDtype",
protobuf_scheme=PandasCategoricalDtype_PB,
type_object2proto=object2proto,
type_proto2object=proto2object,
)
| apache-2.0 |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/statsmodels-0.5.0-py2.7-linux-x86_64.egg/statsmodels/datasets/statecrime/data.py | 3 | 2985 | #! /usr/bin/env python
"""Statewide Crime Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Public domain."""
TITLE = """Statewide Crime Data 2009"""
SOURCE = """
All data is for 2009 and was obtained from the American Statistical Abstracts except as indicated below.
"""
DESCRSHORT = """State crime data 2009"""
DESCRLONG = DESCRSHORT
#suggested notes
NOTE = """
Number of observations: 51
Number of variables: 8
Variable name definitions:
state
All 50 states plus DC.
violent
Rate of violent crimes / 100,000 population. Includes murder, forcible
rape, robbery, and aggravated assault. Numbers for Illinois and Minnesota
do not include forcible rapes. Footnote included with the American
Statistical Abstract table reads:
"The data collection methodology for the offense of forcible
rape used by the Illinois and the Minnesota state Uniform Crime Reporting
(UCR) Programs (with the exception of Rockford, Illinois, and Minneapolis
and St. Paul, Minnesota) does not comply with national UCR guidelines.
Consequently, their state figures for forcible rape and violent crime (of
which forcible rape is a part) are not published in this table."
murder
Rate of murders / 100,000 population.
hs_grad
Precent of population having graduated from high school or higher.
poverty
% of individuals below the poverty line
white
Percent of population that is one race - white only. From 2009 American
Community Survey
single
Calculated from 2009 1-year American Community Survey obtained obtained
from Census. Variable is Male householder, no wife present, family
household combined with Female household, no husband prsent, family
household, divided by the total number of Family households.
urban
% of population in Urbanized Areas as of 2010 Census. Urbanized Areas are
area of 50,000 or more people."""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the statecrime data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray(data, endog_idx=2, exog_idx=[7, 4, 3, 5],
dtype=float)
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=2, exog_idx=[7,4,3,5],
dtype=float, index_idx=0)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/statecrime.csv', 'rb'),
delimiter=",", names=True, dtype=None)
return data
| apache-2.0 |
asadziach/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 92 | 4535 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""This input_fn diffs from the core version with default `shuffle`."""
return core_pandas_input_fn(x=x,
y=y,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
target_column=target_column)
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
| apache-2.0 |
cbertinato/pandas | pandas/io/excel/_openpyxl.py | 1 | 14098 | from pandas.io.excel._base import ExcelWriter
from pandas.io.excel._util import _validate_freeze_panes
class _OpenpyxlWriter(ExcelWriter):
engine = 'openpyxl'
supported_extensions = ('.xlsx', '.xlsm')
def __init__(self, path, engine=None, mode='w', **engine_kwargs):
# Use the openpyxl module as the Excel writer.
from openpyxl.workbook import Workbook
super().__init__(path, mode=mode, **engine_kwargs)
if self.mode == 'a': # Load from existing workbook
from openpyxl import load_workbook
book = load_workbook(self.path)
self.book = book
else:
# Create workbook object with default optimized_write=True.
self.book = Workbook()
if self.book.worksheets:
try:
self.book.remove(self.book.worksheets[0])
except AttributeError:
# compat - for openpyxl <= 2.4
self.book.remove_sheet(self.book.worksheets[0])
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
@classmethod
def _convert_to_style(cls, style_dict):
"""
converts a style_dict to an openpyxl style object
Parameters
----------
style_dict : style dictionary to convert
"""
from openpyxl.style import Style
xls_style = Style()
for key, value in style_dict.items():
for nk, nv in value.items():
if key == "borders":
(xls_style.borders.__getattribute__(nk)
.__setattr__('border_style', nv))
else:
xls_style.__getattribute__(key).__setattr__(nk, nv)
return xls_style
@classmethod
def _convert_to_style_kwargs(cls, style_dict):
"""
Convert a style_dict to a set of kwargs suitable for initializing
or updating-on-copy an openpyxl v2 style object
Parameters
----------
style_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'font'
'fill'
'border' ('borders')
'alignment'
'number_format'
'protection'
Returns
-------
style_kwargs : dict
A dict with the same, normalized keys as ``style_dict`` but each
value has been replaced with a native openpyxl style object of the
appropriate class.
"""
_style_key_map = {
'borders': 'border',
}
style_kwargs = {}
for k, v in style_dict.items():
if k in _style_key_map:
k = _style_key_map[k]
_conv_to_x = getattr(cls, '_convert_to_{k}'.format(k=k),
lambda x: None)
new_v = _conv_to_x(v)
if new_v:
style_kwargs[k] = new_v
return style_kwargs
@classmethod
def _convert_to_color(cls, color_spec):
"""
Convert ``color_spec`` to an openpyxl v2 Color object
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color
"""
from openpyxl.styles import Color
if isinstance(color_spec, str):
return Color(color_spec)
else:
return Color(**color_spec)
@classmethod
def _convert_to_font(cls, font_dict):
"""
Convert ``font_dict`` to an openpyxl v2 Font object
Parameters
----------
font_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'name'
'size' ('sz')
'bold' ('b')
'italic' ('i')
'underline' ('u')
'strikethrough' ('strike')
'color'
'vertAlign' ('vertalign')
'charset'
'scheme'
'family'
'outline'
'shadow'
'condense'
Returns
-------
font : openpyxl.styles.Font
"""
from openpyxl.styles import Font
_font_key_map = {
'sz': 'size',
'b': 'bold',
'i': 'italic',
'u': 'underline',
'strike': 'strikethrough',
'vertalign': 'vertAlign',
}
font_kwargs = {}
for k, v in font_dict.items():
if k in _font_key_map:
k = _font_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
font_kwargs[k] = v
return Font(**font_kwargs)
@classmethod
def _convert_to_stop(cls, stop_seq):
"""
Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
suitable for initializing the ``GradientFill`` ``stop`` parameter.
Parameters
----------
stop_seq : iterable
An iterable that yields objects suitable for consumption by
``_convert_to_color``.
Returns
-------
stop : list of openpyxl.styles.Color
"""
return map(cls._convert_to_color, stop_seq)
@classmethod
def _convert_to_fill(cls, fill_dict):
"""
Convert ``fill_dict`` to an openpyxl v2 Fill object
Parameters
----------
fill_dict : dict
A dict with one or more of the following keys (or their synonyms),
'fill_type' ('patternType', 'patterntype')
'start_color' ('fgColor', 'fgcolor')
'end_color' ('bgColor', 'bgcolor')
or one or more of the following keys (or their synonyms).
'type' ('fill_type')
'degree'
'left'
'right'
'top'
'bottom'
'stop'
Returns
-------
fill : openpyxl.styles.Fill
"""
from openpyxl.styles import PatternFill, GradientFill
_pattern_fill_key_map = {
'patternType': 'fill_type',
'patterntype': 'fill_type',
'fgColor': 'start_color',
'fgcolor': 'start_color',
'bgColor': 'end_color',
'bgcolor': 'end_color',
}
_gradient_fill_key_map = {
'fill_type': 'type',
}
pfill_kwargs = {}
gfill_kwargs = {}
for k, v in fill_dict.items():
pk = gk = None
if k in _pattern_fill_key_map:
pk = _pattern_fill_key_map[k]
if k in _gradient_fill_key_map:
gk = _gradient_fill_key_map[k]
if pk in ['start_color', 'end_color']:
v = cls._convert_to_color(v)
if gk == 'stop':
v = cls._convert_to_stop(v)
if pk:
pfill_kwargs[pk] = v
elif gk:
gfill_kwargs[gk] = v
else:
pfill_kwargs[k] = v
gfill_kwargs[k] = v
try:
return PatternFill(**pfill_kwargs)
except TypeError:
return GradientFill(**gfill_kwargs)
@classmethod
def _convert_to_side(cls, side_spec):
"""
Convert ``side_spec`` to an openpyxl v2 Side object
Parameters
----------
side_spec : str, dict
A string specifying the border style, or a dict with zero or more
of the following keys (or their synonyms).
'style' ('border_style')
'color'
Returns
-------
side : openpyxl.styles.Side
"""
from openpyxl.styles import Side
_side_key_map = {
'border_style': 'style',
}
if isinstance(side_spec, str):
return Side(style=side_spec)
side_kwargs = {}
for k, v in side_spec.items():
if k in _side_key_map:
k = _side_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
side_kwargs[k] = v
return Side(**side_kwargs)
@classmethod
def _convert_to_border(cls, border_dict):
"""
Convert ``border_dict`` to an openpyxl v2 Border object
Parameters
----------
border_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'left'
'right'
'top'
'bottom'
'diagonal'
'diagonal_direction'
'vertical'
'horizontal'
'diagonalUp' ('diagonalup')
'diagonalDown' ('diagonaldown')
'outline'
Returns
-------
border : openpyxl.styles.Border
"""
from openpyxl.styles import Border
_border_key_map = {
'diagonalup': 'diagonalUp',
'diagonaldown': 'diagonalDown',
}
border_kwargs = {}
for k, v in border_dict.items():
if k in _border_key_map:
k = _border_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
if k in ['left', 'right', 'top', 'bottom', 'diagonal']:
v = cls._convert_to_side(v)
border_kwargs[k] = v
return Border(**border_kwargs)
@classmethod
def _convert_to_alignment(cls, alignment_dict):
"""
Convert ``alignment_dict`` to an openpyxl v2 Alignment object
Parameters
----------
alignment_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'horizontal'
'vertical'
'text_rotation'
'wrap_text'
'shrink_to_fit'
'indent'
Returns
-------
alignment : openpyxl.styles.Alignment
"""
from openpyxl.styles import Alignment
return Alignment(**alignment_dict)
@classmethod
def _convert_to_number_format(cls, number_format_dict):
"""
Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
initializer.
Parameters
----------
number_format_dict : dict
A dict with zero or more of the following keys.
'format_code' : str
Returns
-------
number_format : str
"""
return number_format_dict['format_code']
@classmethod
def _convert_to_protection(cls, protection_dict):
"""
Convert ``protection_dict`` to an openpyxl v2 Protection object.
Parameters
----------
protection_dict : dict
A dict with zero or more of the following keys.
'locked'
'hidden'
Returns
-------
"""
from openpyxl.styles import Protection
return Protection(**protection_dict)
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
freeze_panes=None):
# Write the frame cells using openpyxl.
sheet_name = self._get_sheet_name(sheet_name)
_style_cache = {}
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.create_sheet()
wks.title = sheet_name
self.sheets[sheet_name] = wks
if _validate_freeze_panes(freeze_panes):
wks.freeze_panes = wks.cell(row=freeze_panes[0] + 1,
column=freeze_panes[1] + 1)
for cell in cells:
xcell = wks.cell(
row=startrow + cell.row + 1,
column=startcol + cell.col + 1
)
xcell.value, fmt = self._value_with_fmt(cell.val)
if fmt:
xcell.number_format = fmt
style_kwargs = {}
if cell.style:
key = str(cell.style)
style_kwargs = _style_cache.get(key)
if style_kwargs is None:
style_kwargs = self._convert_to_style_kwargs(cell.style)
_style_cache[key] = style_kwargs
if style_kwargs:
for k, v in style_kwargs.items():
setattr(xcell, k, v)
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_cells(
start_row=startrow + cell.row + 1,
start_column=startcol + cell.col + 1,
end_column=startcol + cell.mergeend + 1,
end_row=startrow + cell.mergestart + 1
)
# When cells are merged only the top-left cell is preserved
# The behaviour of the other cells in a merged range is
# undefined
if style_kwargs:
first_row = startrow + cell.row + 1
last_row = startrow + cell.mergestart + 1
first_col = startcol + cell.col + 1
last_col = startcol + cell.mergeend + 1
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
# Ignore first cell. It is already handled.
continue
xcell = wks.cell(column=col, row=row)
for k, v in style_kwargs.items():
setattr(xcell, k, v)
| bsd-3-clause |
moreati/pandashells | pandashells/lib/arg_lib.py | 7 | 6681 | from pandashells.lib import config_lib
def _check_for_recognized_args(*args):
"""
Raise an error if unrecognized argset is specified
"""
allowed_arg_set = set([
'io_in',
'io_out',
'example',
'xy_plotting',
'decorating',
])
in_arg_set = set(args)
unrecognized_set = in_arg_set - allowed_arg_set
if unrecognized_set:
msg = '{} not in allowed set {}'.format(unrecognized_set,
allowed_arg_set)
raise ValueError(msg)
def _io_in_adder(parser, config_dict, *args):
"""
Add input options to the parser
"""
in_arg_set = set(args)
if 'io_in' in in_arg_set:
group = parser.add_argument_group('Input Options')
# define the valid components
io_opt_list = ['csv', 'table', 'header', 'noheader']
# allow the option of supplying input column names
msg = 'Overwrite input column names with this list'
group.add_argument(
'--names', nargs='+', type=str, dest='names',
metavar="name", help=msg)
default_for_input = [
config_dict['io_input_type'],
config_dict['io_input_header']
]
msg = 'Must be one of {}'.format(repr(io_opt_list))
group.add_argument(
'-i', '--input_options', nargs='+', type=str, dest='input_options',
metavar='option', default=default_for_input, choices=io_opt_list,
help=msg)
def _io_out_adder(parser, config_dict, *args):
"""
Add output options to the parser
"""
in_arg_set = set(args)
if 'io_out' in in_arg_set:
group = parser.add_argument_group('Output Options')
# define the valid components
io_opt_list = [
'csv', 'table', 'html', 'header', 'noheader', 'index', 'noindex',
]
# define the current defaults
default_for_output = [
config_dict['io_output_type'],
config_dict['io_output_header'],
config_dict['io_output_index']
]
# show the current defaults in the arg parser
msg = 'Must be one of {}'.format(repr(io_opt_list))
group.add_argument(
'-o', '--output_options', nargs='+',
type=str, dest='output_options', metavar='option',
default=default_for_output, help=msg)
msg = (
'Replace NaNs with this string. '
'A string containing \'nan\' will set na_rep to numpy NaN. '
'Current default is {}'
).format(repr(str(config_dict['io_output_na_rep'])))
group.add_argument(
'--output_na_rep', nargs=1, type=str, dest='io_output_na_rep',
help=msg)
def _decorating_adder(parser, *args):
in_arg_set = set(args)
if 'decorating' in in_arg_set:
# get a list of valid plot styling info
context_list = [t for t in config_lib.CONFIG_OPTS if
t[0] == 'plot_context'][0][1]
theme_list = [t for t in config_lib.CONFIG_OPTS if
t[0] == 'plot_theme'][0][1]
palette_list = [t for t in config_lib.CONFIG_OPTS if
t[0] == 'plot_palette'][0][1]
group = parser.add_argument_group('Plot specific Options')
msg = "Set the x-limits for the plot"
group.add_argument(
'--xlim', nargs=2, type=float, dest='xlim',
metavar=('XMIN', 'XMAX'), help=msg)
msg = "Set the y-limits for the plot"
group.add_argument(
'--ylim', nargs=2, type=float, dest='ylim',
metavar=('YMIN', 'YMAX'), help=msg)
msg = "Draw x axis with log scale"
group.add_argument(
'--xlog', action='store_true', dest='xlog', default=False,
help=msg)
msg = "Draw y axis with log scale"
group.add_argument(
'--ylog', action='store_true', dest='ylog', default=False,
help=msg)
msg = "Set the x-label for the plot"
group.add_argument(
'--xlabel', nargs=1, type=str, dest='xlabel', help=msg)
msg = "Set the y-label for the plot"
group.add_argument(
'--ylabel', nargs=1, type=str, dest='ylabel', help=msg)
msg = "Set the title for the plot"
group.add_argument(
'--title', nargs=1, type=str, dest='title', help=msg)
msg = "Specify legend location"
group.add_argument(
'--legend', nargs=1, type=str, dest='legend',
choices=['1', '2', '3', '4', 'best'], help=msg)
msg = "Specify whether hide the grid or not"
group.add_argument(
'--nogrid', action='store_true', dest='no_grid', default=False,
help=msg)
msg = "Specify plot context. Default = '{}' ".format(context_list[0])
group.add_argument(
'--context', nargs=1, type=str, dest='plot_context',
default=[context_list[0]], choices=context_list, help=msg)
msg = "Specify plot theme. Default = '{}' ".format(theme_list[0])
group.add_argument(
'--theme', nargs=1, type=str, dest='plot_theme',
default=[theme_list[0]], choices=theme_list, help=msg)
msg = "Specify plot palette. Default = '{}' ".format(palette_list[0])
group.add_argument(
'--palette', nargs=1, type=str, dest='plot_palette',
default=[palette_list[0]], choices=palette_list, help=msg)
msg = "Save the figure to this file"
group.add_argument('--savefig', nargs=1, type=str, help=msg)
def _xy_adder(parser, *args):
in_arg_set = set(args)
if 'xy_plotting' in in_arg_set:
msg = 'Column to plot on x-axis'
parser.add_argument(
'-x', nargs=1, type=str, dest='x', metavar='col', help=msg)
msg = 'List of columns to plot on y-axis'
parser.add_argument(
'-y', nargs='+', type=str, dest='y', metavar='col', help=msg)
msg = "Plot style(s) defaults to .-"
parser.add_argument(
'-s', '--style', nargs='+', type=str, dest='style', default=['.-'],
help=msg, metavar='style')
def add_args(parser, *args):
"""Adds argument blocks to the arg parser
:type parser: argparse instance
:param parser: The argarse instance to use in adding arguments
Additinional arguments are the names of argument blocks to add
"""
config_dict = config_lib.get_config()
_check_for_recognized_args(*args)
_io_in_adder(parser, config_dict, *args)
_io_out_adder(parser, config_dict, *args)
_decorating_adder(parser, *args)
_xy_adder(parser, *args)
| bsd-2-clause |
mykoz/ThinkStats2 | code/thinkstats2.py | 68 | 68825 | """This file contains code for use with "Think Stats" and
"Think Bayes", both by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
"""This file contains class definitions for:
Hist: represents a histogram (map from values to integer frequencies).
Pmf: represents a probability mass function (map from values to probs).
_DictWrapper: private parent class for Hist and Pmf.
Cdf: represents a discrete cumulative distribution function
Pdf: represents a continuous probability density function
"""
import bisect
import copy
import logging
import math
import random
import re
from collections import Counter
from operator import itemgetter
import thinkplot
import numpy as np
import pandas
import scipy
from scipy import stats
from scipy import special
from scipy import ndimage
from io import open
ROOT2 = math.sqrt(2)
def RandomSeed(x):
"""Initialize the random and np.random generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
def Odds(p):
"""Computes odds for a given probability.
Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
def Probability(o):
"""Computes the probability corresponding to given odds.
Example: o=2 means 2:1 odds in favor, or 2/3 probability
o: float odds, strictly positive
Returns: float probability
"""
return o / (o + 1)
def Probability2(yes, no):
"""Computes the probability corresponding to given odds.
Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability.
yes, no: int or float odds in favor
"""
return yes / (yes + no)
class Interpolator(object):
"""Represents a mapping between sorted sequences; performs linear interp.
Attributes:
xs: sorted list
ys: sorted list
"""
def __init__(self, xs, ys):
self.xs = xs
self.ys = ys
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys)
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs)
def _Bisect(self, x, xs, ys):
"""Helper function."""
if x <= xs[0]:
return ys[0]
if x >= xs[-1]:
return ys[-1]
i = bisect.bisect(xs, x)
frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1])
y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])
return y
class _DictWrapper(object):
"""An object that contains a dictionary."""
def __init__(self, obj=None, label=None):
"""Initializes the distribution.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
label: string label
"""
self.label = label if label is not None else '_nolegend_'
self.d = {}
# flag whether the distribution is under a log transform
self.log = False
if obj is None:
return
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.label = label if label is not None else obj.label
if isinstance(obj, dict):
self.d.update(obj.items())
elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.d.update(obj.Items())
elif isinstance(obj, pandas.Series):
self.d.update(obj.value_counts().iteritems())
else:
# finally, treat it like a list
self.d.update(Counter(obj))
if len(self) > 0 and isinstance(self, Pmf):
self.Normalize()
def __hash__(self):
return id(self)
def __str__(self):
cls = self.__class__.__name__
return '%s(%s)' % (cls, str(self.d))
__repr__ = __str__
def __eq__(self, other):
return self.d == other.d
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def iterkeys(self):
"""Returns an iterator over keys."""
return iter(self.d)
def __contains__(self, value):
return value in self.d
def __getitem__(self, value):
return self.d.get(value, 0)
def __setitem__(self, value, prob):
self.d[value] = prob
def __delitem__(self, value):
del self.d[value]
def Copy(self, label=None):
"""Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
label: string label for the new Hist
returns: new _DictWrapper with the same type
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
return new
def Scale(self, factor):
"""Multiplies the values by a factor.
factor: what to multiply by
Returns: new object
"""
new = self.Copy()
new.d.clear()
for val, prob in self.Items():
new.Set(val * factor, prob)
return new
def Log(self, m=None):
"""Log transforms the probabilities.
Removes values with probability 0.
Normalizes so that the largest logprob is 0.
"""
if self.log:
raise ValueError("Pmf/Hist already under a log transform")
self.log = True
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
if p:
self.Set(x, math.log(p / m))
else:
self.Remove(x)
def Exp(self, m=None):
"""Exponentiates the probabilities.
m: how much to shift the ps before exponentiating
If m is None, normalizes so that the largest prob is 1.
"""
if not self.log:
raise ValueError("Pmf/Hist not under a log transform")
self.log = False
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
self.Set(x, math.exp(p - m))
def GetDict(self):
"""Gets the dictionary."""
return self.d
def SetDict(self, d):
"""Sets the dictionary."""
self.d = d
def Values(self):
"""Gets an unsorted sequence of values.
Note: one source of confusion is that the keys of this
dictionary are the values of the Hist/Pmf, and the
values of the dictionary are frequencies/probabilities.
"""
return self.d.keys()
def Items(self):
"""Gets an unsorted sequence of (value, freq/prob) pairs."""
return self.d.items()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
Note: options are ignored
Returns:
tuple of (sorted value sequence, freq/prob sequence)
"""
if min(self.d.keys()) is np.nan:
logging.warning('Hist: contains NaN, may not render correctly.')
return zip(*sorted(self.Items()))
def MakeCdf(self, label=None):
"""Makes a Cdf."""
label = label if label is not None else self.label
return Cdf(self, label=label)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in sorted(self.d.items()):
print(val, prob)
def Set(self, x, y=0):
"""Sets the freq/prob associated with the value x.
Args:
x: number value
y: number freq or prob
"""
self.d[x] = y
def Incr(self, x, term=1):
"""Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by
"""
self.d[x] = self.d.get(x, 0) + term
def Mult(self, x, factor):
"""Scales the freq/prob associated with the value x.
Args:
x: number value
factor: how much to multiply by
"""
self.d[x] = self.d.get(x, 0) * factor
def Remove(self, x):
"""Removes a value.
Throws an exception if the value is not there.
Args:
x: value to remove
"""
del self.d[x]
def Total(self):
"""Returns the total of the frequencies/probabilities in the map."""
total = sum(self.d.values())
return total
def MaxLike(self):
"""Returns the largest frequency/probability in the map."""
return max(self.d.values())
def Largest(self, n=10):
"""Returns the largest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=True)[:n]
def Smallest(self, n=10):
"""Returns the smallest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=False)[:n]
class Hist(_DictWrapper):
"""Represents a histogram, which is a map from values to frequencies.
Values can be any hashable type; frequencies are integer counters.
"""
def Freq(self, x):
"""Gets the frequency associated with the value x.
Args:
x: number value
Returns:
int frequency
"""
return self.d.get(x, 0)
def Freqs(self, xs):
"""Gets frequencies for a sequence of values."""
return [self.Freq(x) for x in xs]
def IsSubset(self, other):
"""Checks whether the values in this histogram are a subset of
the values in the given histogram."""
for val, freq in self.Items():
if freq > other.Freq(val):
return False
return True
def Subtract(self, other):
"""Subtracts the values in the given histogram from this histogram."""
for val, freq in other.Items():
self.Incr(val, -freq)
class Pmf(_DictWrapper):
"""Represents a probability mass function.
Values can be any hashable type; probabilities are floating-point.
Pmfs are not necessarily normalized.
"""
def Prob(self, x, default=0):
"""Gets the probability associated with the value x.
Args:
x: number value
default: value to return if the key is not there
Returns:
float probability
"""
return self.d.get(x, default)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Percentile(self, percentage):
"""Computes a percentile of a given Pmf.
Note: this is not super efficient. If you are planning
to compute more than a few percentiles, compute the Cdf.
percentage: float 0-100
returns: value from the Pmf
"""
p = percentage / 100.0
total = 0
for val, prob in sorted(self.Items()):
total += prob
if total >= p:
return val
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbGreater(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val > x]
return sum(t)
def ProbLess(self, x):
"""Probability that a sample from this Pmf is less than x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbLess(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val < x]
return sum(t)
def __lt__(self, obj):
"""Less than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbLess(obj)
def __gt__(self, obj):
"""Greater than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbGreater(obj)
def __ge__(self, obj):
"""Greater than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self < obj)
def __le__(self, obj):
"""Less than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self > obj)
def Normalize(self, fraction=1.0):
"""Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
"""
if self.log:
raise ValueError("Normalize: Pmf is under a log transform")
total = self.Total()
if total == 0.0:
raise ValueError('Normalize: total probability is zero.')
#logging.warning('Normalize: total probability is zero.')
#return total
factor = fraction / total
for x in self.d:
self.d[x] *= factor
return total
def Random(self):
"""Chooses a random element from this PMF.
Note: this is not very efficient. If you plan to call
this more than a few times, consider converting to a CDF.
Returns:
float value from the Pmf
"""
target = random.random()
total = 0.0
for x, p in self.d.items():
total += p
if total >= target:
return x
# we shouldn't get here
raise ValueError('Random: Pmf might not be normalized.')
def Mean(self):
"""Computes the mean of a PMF.
Returns:
float mean
"""
mean = 0.0
for x, p in self.d.items():
mean += p * x
return mean
def Var(self, mu=None):
"""Computes the variance of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float variance
"""
if mu is None:
mu = self.Mean()
var = 0.0
for x, p in self.d.items():
var += p * (x - mu) ** 2
return var
def Std(self, mu=None):
"""Computes the standard deviation of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float standard deviation
"""
var = self.Var(mu)
return math.sqrt(var)
def MaximumLikelihood(self):
"""Returns the value with the highest probability.
Returns: float probability
"""
_, val = max((prob, val) for val, prob in self.Items())
return val
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = self.MakeCdf()
return cdf.CredibleInterval(percentage)
def __add__(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf or a scalar
returns: new Pmf
"""
try:
return self.AddPmf(other)
except AttributeError:
return self.AddConstant(other)
def AddPmf(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 + v2, p1 * p2)
return pmf
def AddConstant(self, other):
"""Computes the Pmf of the sum a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 + other, p1)
return pmf
def __sub__(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.SubPmf(other)
except AttributeError:
return self.AddConstant(-other)
def SubPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 - v2, p1 * p2)
return pmf
def __mul__(self, other):
"""Computes the Pmf of the product of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.MulPmf(other)
except AttributeError:
return self.MulConstant(other)
def MulPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 * v2, p1 * p2)
return pmf
def MulConstant(self, other):
"""Computes the Pmf of the product of a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 * other, p1)
return pmf
def __div__(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.DivPmf(other)
except AttributeError:
return self.MulConstant(1/other)
__truediv__ = __div__
def DivPmf(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 / v2, p1 * p2)
return pmf
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.MakeCdf()
return cdf.Max(k)
class Joint(Pmf):
"""Represents a joint distribution.
The values are sequences (usually tuples)
"""
def Marginal(self, i, label=None):
"""Gets the marginal distribution of the indicated variable.
i: index of the variable we want
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
pmf.Incr(vs[i], prob)
return pmf
def Conditional(self, i, j, val, label=None):
"""Gets the conditional distribution of the indicated variable.
Distribution of vs[i], conditioned on vs[j] = val.
i: index of the variable we want
j: which variable is conditioned on
val: the value the jth variable has to have
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
if vs[j] != val:
continue
pmf.Incr(vs[i], prob)
pmf.Normalize()
return pmf
def MaxLikeInterval(self, percentage=90):
"""Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
"""
interval = []
total = 0
t = [(prob, val) for val, prob in self.Items()]
t.sort(reverse=True)
for prob, val in t:
interval.append(val)
total += prob
if total >= percentage / 100.0:
break
return interval
def MakeJoint(pmf1, pmf2):
"""Joint distribution of values from pmf1 and pmf2.
Assumes that the PMFs represent independent random variables.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
Joint pmf of value pairs
"""
joint = Joint()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
joint.Set((v1, v2), p1 * p2)
return joint
def MakeHistFromList(t, label=None):
"""Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this histogram
Returns:
Hist object
"""
return Hist(t, label=label)
def MakeHistFromDict(d, label=None):
"""Makes a histogram from a map from values to frequencies.
Args:
d: dictionary that maps values to frequencies
label: string label for this histogram
Returns:
Hist object
"""
return Hist(d, label)
def MakePmfFromList(t, label=None):
"""Makes a PMF from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(t, label=label)
def MakePmfFromDict(d, label=None):
"""Makes a PMF from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(d, label=label)
def MakePmfFromItems(t, label=None):
"""Makes a PMF from a sequence of value-probability pairs
Args:
t: sequence of value-probability pairs
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(dict(t), label=label)
def MakePmfFromHist(hist, label=None):
"""Makes a normalized PMF from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Pmf object
"""
if label is None:
label = hist.label
return Pmf(hist, label=label)
def MakeMixture(metapmf, label='mix'):
"""Make a mixture distribution.
Args:
metapmf: Pmf that maps from Pmfs to probs.
label: string label for the new Pmf.
Returns: Pmf object.
"""
mix = Pmf(label=label)
for pmf, p1 in metapmf.Items():
for x, p2 in pmf.Items():
mix.Incr(x, p1 * p2)
return mix
def MakeUniformPmf(low, high, n):
"""Make a uniform Pmf.
low: lowest value (inclusive)
high: highest value (inclusize)
n: number of values
"""
pmf = Pmf()
for x in np.linspace(low, high, n):
pmf.Set(x, 1)
pmf.Normalize()
return pmf
class Cdf(object):
"""Represents a cumulative distribution function.
Attributes:
xs: sequence of values
ps: sequence of probabilities
label: string used as a graph label.
"""
def __init__(self, obj=None, ps=None, label=None):
"""Initializes.
If ps is provided, obj must be the corresponding list of values.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
ps: list of cumulative probabilities
label: string label
"""
self.label = label if label is not None else '_nolegend_'
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
if not label:
self.label = label if label is not None else obj.label
if obj is None:
# caller does not provide obj, make an empty Cdf
self.xs = np.asarray([])
self.ps = np.asarray([])
if ps is not None:
logging.warning("Cdf: can't pass ps without also passing xs.")
return
else:
# if the caller provides xs and ps, just store them
if ps is not None:
if isinstance(ps, str):
logging.warning("Cdf: ps can't be a string")
self.xs = np.asarray(obj)
self.ps = np.asarray(ps)
return
# caller has provided just obj, not ps
if isinstance(obj, Cdf):
self.xs = copy.copy(obj.xs)
self.ps = copy.copy(obj.ps)
return
if isinstance(obj, _DictWrapper):
dw = obj
else:
dw = Hist(obj)
if len(dw) == 0:
self.xs = np.asarray([])
self.ps = np.asarray([])
return
xs, freqs = zip(*sorted(dw.Items()))
self.xs = np.asarray(xs)
self.ps = np.cumsum(freqs, dtype=np.float)
self.ps /= self.ps[-1]
def __str__(self):
return 'Cdf(%s, %s)' % (str(self.xs), str(self.ps))
__repr__ = __str__
def __len__(self):
return len(self.xs)
def __getitem__(self, x):
return self.Prob(x)
def __setitem__(self):
raise UnimplementedMethodException()
def __delitem__(self):
raise UnimplementedMethodException()
def __eq__(self, other):
return np.all(self.xs == other.xs) and np.all(self.ps == other.ps)
def Copy(self, label=None):
"""Returns a copy of this Cdf.
label: string label for the new Cdf
"""
if label is None:
label = self.label
return Cdf(list(self.xs), list(self.ps), label=label)
def MakePmf(self, label=None):
"""Makes a Pmf."""
if label is None:
label = self.label
return Pmf(self, label=label)
def Values(self):
"""Returns a sorted list of values.
"""
return self.xs
def Items(self):
"""Returns a sorted sequence of (value, probability) pairs.
Note: in Python3, returns an iterator.
"""
a = self.ps
b = np.roll(a, 1)
b[0] = 0
return zip(self.xs, a-b)
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
new = self.Copy()
# don't use +=, or else an int array + float yields int array
new.xs = new.xs + term
return new
def Scale(self, factor):
"""Multiplies the xs by a factor.
factor: what to multiply by
"""
new = self.Copy()
# don't use *=, or else an int array * float yields int array
new.xs = new.xs * factor
return new
def Prob(self, x):
"""Returns CDF(x), the probability that corresponds to value x.
Args:
x: number
Returns:
float probability
"""
if x < self.xs[0]:
return 0.0
index = bisect.bisect(self.xs, x)
p = self.ps[index-1]
return p
def Probs(self, xs):
"""Gets probabilities for a sequence of values.
xs: any sequence that can be converted to NumPy array
returns: NumPy array of cumulative probabilities
"""
xs = np.asarray(xs)
index = np.searchsorted(self.xs, xs, side='right')
ps = self.ps[index-1]
ps[xs < self.xs[0]] = 0.0
return ps
ProbArray = Probs
def Value(self, p):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
p: number in the range [0, 1]
Returns:
number value
"""
if p < 0 or p > 1:
raise ValueError('Probability p must be in range [0, 1]')
index = bisect.bisect_left(self.ps, p)
return self.xs[index]
def ValueArray(self, ps):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
ps: NumPy array of numbers in the range [0, 1]
Returns:
NumPy array of values
"""
ps = np.asarray(ps)
if np.any(ps < 0) or np.any(ps > 1):
raise ValueError('Probability p must be in range [0, 1]')
index = np.searchsorted(self.ps, ps, side='left')
return self.xs[index]
def Percentile(self, p):
"""Returns the value that corresponds to percentile p.
Args:
p: number in the range [0, 100]
Returns:
number value
"""
return self.Value(p / 100.0)
def PercentileRank(self, x):
"""Returns the percentile rank of the value x.
x: potential value in the CDF
returns: percentile rank in the range 0 to 100
"""
return self.Prob(x) * 100.0
def Random(self):
"""Chooses a random value from this distribution."""
return self.Value(random.random())
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
ps = np.random.random(n)
return self.ValueArray(ps)
def Mean(self):
"""Computes the mean of a CDF.
Returns:
float mean
"""
old_p = 0
total = 0.0
for x, new_p in zip(self.xs, self.ps):
p = new_p - old_p
total += p * x
old_p = new_p
return total
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
prob = (1 - percentage / 100.0) / 2
interval = self.Value(prob), self.Value(1 - prob)
return interval
ConfidenceInterval = CredibleInterval
def _Round(self, multiplier=1000.0):
"""
An entry is added to the cdf only if the percentile differs
from the previous value in a significant digit, where the number
of significant digits is determined by multiplier. The
default is 1000, which keeps log10(1000) = 3 significant digits.
"""
# TODO(write this method)
raise UnimplementedMethodException()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
An empirical CDF is a step function; linear interpolation
can be misleading.
Note: options are ignored
Returns:
tuple of (xs, ps)
"""
def interleave(a, b):
c = np.empty(a.shape[0] + b.shape[0])
c[::2] = a
c[1::2] = b
return c
a = np.array(self.xs)
xs = interleave(a, a)
shift_ps = np.roll(self.ps, 1)
shift_ps[0] = 0
ps = interleave(shift_ps, self.ps)
return xs, ps
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.Copy()
cdf.ps **= k
return cdf
def MakeCdfFromItems(items, label=None):
"""Makes a cdf from an unsorted sequence of (value, frequency) pairs.
Args:
items: unsorted sequence of (value, frequency) pairs
label: string label for this CDF
Returns:
cdf: list of (value, fraction) pairs
"""
return Cdf(dict(items), label=label)
def MakeCdfFromDict(d, label=None):
"""Makes a CDF from a dictionary that maps values to frequencies.
Args:
d: dictionary that maps values to frequencies.
label: string label for the data.
Returns:
Cdf object
"""
return Cdf(d, label=label)
def MakeCdfFromList(seq, label=None):
"""Creates a CDF from an unsorted sequence.
Args:
seq: unsorted sequence of sortable values
label: string label for the cdf
Returns:
Cdf object
"""
return Cdf(seq, label=label)
def MakeCdfFromHist(hist, label=None):
"""Makes a CDF from a Hist object.
Args:
hist: Pmf.Hist object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = hist.label
return Cdf(hist, label=label)
def MakeCdfFromPmf(pmf, label=None):
"""Makes a CDF from a Pmf object.
Args:
pmf: Pmf.Pmf object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = pmf.label
return Cdf(pmf, label=label)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class Suite(Pmf):
"""Represents a suite of hypotheses and their probabilities."""
def Update(self, data):
"""Updates each hypothesis based on the data.
data: any representation of the data
returns: the normalizing constant
"""
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdate(self, data):
"""Updates a suite of hypotheses based on new data.
Modifies the suite directly; if you want to keep the original, make
a copy.
Note: unlike Update, LogUpdate does not normalize.
Args:
data: any representation of the data
"""
for hypo in self.Values():
like = self.LogLikelihood(data, hypo)
self.Incr(hypo, like)
def UpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
This is more efficient than calling Update repeatedly because
it waits until the end to Normalize.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: the normalizing constant
"""
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: None
"""
for data in dataset:
self.LogUpdate(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def LogLikelihood(self, data, hypo):
"""Computes the log likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def Print(self):
"""Prints the hypotheses and their probabilities."""
for hypo, prob in sorted(self.Items()):
print(hypo, prob)
def MakeOdds(self):
"""Transforms from probabilities to odds.
Values with prob=0 are removed.
"""
for hypo, prob in self.Items():
if prob:
self.Set(hypo, Odds(prob))
else:
self.Remove(hypo)
def MakeProbs(self):
"""Transforms from odds to probabilities."""
for hypo, odds in self.Items():
self.Set(hypo, Probability(odds))
def MakeSuiteFromList(t, label=None):
"""Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this suite
Returns:
Suite object
"""
hist = MakeHistFromList(t, label=label)
d = hist.GetDict()
return MakeSuiteFromDict(d)
def MakeSuiteFromHist(hist, label=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Suite object
"""
if label is None:
label = hist.label
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, label)
def MakeSuiteFromDict(d, label=None):
"""Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this suite
Returns:
Suite object
"""
suite = Suite(label=label)
suite.SetDict(d)
suite.Normalize()
return suite
class Pdf(object):
"""Represents a probability density function (PDF)."""
def Density(self, x):
"""Evaluates this Pdf at x.
Returns: float or NumPy array of probability density
"""
raise UnimplementedMethodException()
def GetLinspace(self):
"""Get a linspace for plotting.
Not all subclasses of Pdf implement this.
Returns: numpy array
"""
raise UnimplementedMethodException()
def MakePmf(self, **options):
"""Makes a discrete version of this Pdf.
options can include
label: string
low: low end of range
high: high end of range
n: number of places to evaluate
Returns: new Pmf
"""
label = options.pop('label', '')
xs, ds = self.Render(**options)
return Pmf(dict(zip(xs, ds)), label=label)
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
If options includes low and high, it must also include n;
in that case the density is evaluated an n locations between
low and high, including both.
If options includes xs, the density is evaluate at those location.
Otherwise, self.GetLinspace is invoked to provide the locations.
Returns:
tuple of (xs, densities)
"""
low, high = options.pop('low', None), options.pop('high', None)
if low is not None and high is not None:
n = options.pop('n', 101)
xs = np.linspace(low, high, n)
else:
xs = options.pop('xs', None)
if xs is None:
xs = self.GetLinspace()
ds = self.Density(xs)
return xs, ds
def Items(self):
"""Generates a sequence of (value, probability) pairs.
"""
return zip(*self.Render())
class NormalPdf(Pdf):
"""Represents the PDF of a Normal distribution."""
def __init__(self, mu=0, sigma=1, label=None):
"""Constructs a Normal Pdf with given mu and sigma.
mu: mean
sigma: standard deviation
label: string
"""
self.mu = mu
self.sigma = sigma
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'NormalPdf(%f, %f)' % (self.mu, self.sigma)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = self.mu-3*self.sigma, self.mu+3*self.sigma
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.norm.pdf(xs, self.mu, self.sigma)
class ExponentialPdf(Pdf):
"""Represents the PDF of an exponential distribution."""
def __init__(self, lam=1, label=None):
"""Constructs an exponential Pdf with given parameter.
lam: rate parameter
label: string
"""
self.lam = lam
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'ExponentialPdf(%f)' % (self.lam)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = 0, 5.0/self.lam
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.expon.pdf(xs, scale=1.0/self.lam)
class EstimatedPdf(Pdf):
"""Represents a PDF estimated by KDE."""
def __init__(self, sample, label=None):
"""Estimates the density function based on a sample.
sample: sequence of data
label: string
"""
self.label = label if label is not None else '_nolegend_'
self.kde = stats.gaussian_kde(sample)
low = min(sample)
high = max(sample)
self.linspace = np.linspace(low, high, 101)
def __str__(self):
return 'EstimatedPdf(label=%s)' % str(self.label)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
return self.linspace
def Density(self, xs):
"""Evaluates this Pdf at xs.
returns: float or NumPy array of probability density
"""
return self.kde.evaluate(xs)
def CredibleInterval(pmf, percentage=90):
"""Computes a credible interval for a given distribution.
If percentage=90, computes the 90% CI.
Args:
pmf: Pmf object representing a posterior distribution
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = pmf.MakeCdf()
prob = (1 - percentage / 100.0) / 2
interval = cdf.Value(prob), cdf.Value(1 - prob)
return interval
def PmfProbLess(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 < v2:
total += p1 * p2
return total
def PmfProbGreater(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 > v2:
total += p1 * p2
return total
def PmfProbEqual(pmf1, pmf2):
"""Probability that a value from pmf1 equals a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 == v2:
total += p1 * p2
return total
def RandomSum(dists):
"""Chooses a random value from each dist and returns the sum.
dists: sequence of Pmf or Cdf objects
returns: numerical sum
"""
total = sum(dist.Random() for dist in dists)
return total
def SampleSum(dists, n):
"""Draws a sample of sums from a list of distributions.
dists: sequence of Pmf or Cdf objects
n: sample size
returns: new Pmf of sums
"""
pmf = Pmf(RandomSum(dists) for i in range(n))
return pmf
def EvalNormalPdf(x, mu, sigma):
"""Computes the unnormalized PDF of the normal distribution.
x: value
mu: mean
sigma: standard deviation
returns: float probability density
"""
return stats.norm.pdf(x, mu, sigma)
def MakeNormalPmf(mu, sigma, num_sigmas, n=201):
"""Makes a PMF discrete approx to a Normal distribution.
mu: float mean
sigma: float standard deviation
num_sigmas: how many sigmas to extend in each direction
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
low = mu - num_sigmas * sigma
high = mu + num_sigmas * sigma
for x in np.linspace(low, high, n):
p = EvalNormalPdf(x, mu, sigma)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial PMF.
Returns the probabily of k successes in n trials with probability p.
"""
return stats.binom.pmf(k, n, p)
def EvalHypergeomPmf(k, N, K, n):
"""Evaluates the hypergeometric PMF.
Returns the probabily of k successes in n trials from a population
N with K successes in it.
"""
return stats.hypergeom.pmf(k, N, K, n)
def EvalPoissonPmf(k, lam):
"""Computes the Poisson PMF.
k: number of events
lam: parameter lambda in events per unit time
returns: float probability
"""
# don't use the scipy function (yet). for lam=0 it returns NaN;
# should be 0.0
# return stats.poisson.pmf(k, lam)
return lam ** k * math.exp(-lam) / special.gamma(k+1)
def MakePoissonPmf(lam, high, step=1):
"""Makes a PMF discrete approx to a Poisson distribution.
lam: parameter lambda in events per unit time
high: upper bound of the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for k in range(0, high + 1, step):
p = EvalPoissonPmf(k, lam)
pmf.Set(k, p)
pmf.Normalize()
return pmf
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x)
def EvalExponentialCdf(x, lam):
"""Evaluates CDF of the exponential distribution with parameter lam."""
return 1 - math.exp(-lam * x)
def MakeExponentialPmf(lam, high, n=200):
"""Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for x in np.linspace(0, high, n):
p = EvalExponentialPdf(x, lam)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def StandardNormalCdf(x):
"""Evaluates the CDF of the standard Normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution
#Cumulative_distribution_function
Args:
x: float
Returns:
float
"""
return (math.erf(x / ROOT2) + 1) / 2
def EvalNormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the normal distribution.
Args:
x: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.cdf(x, loc=mu, scale=sigma)
def EvalNormalCdfInverse(p, mu=0, sigma=1):
"""Evaluates the inverse CDF of the normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.ppf(p, loc=mu, scale=sigma)
def EvalLognormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the lognormal distribution.
x: float or sequence
mu: mean parameter
sigma: standard deviation parameter
Returns: float or sequence
"""
return stats.lognorm.cdf(x, loc=mu, scale=sigma)
def RenderExpoCdf(lam, low, high, n=101):
"""Generates sequences of xs and ps for an exponential CDF.
lam: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = 1 - np.exp(-lam * xs)
#ps = stats.expon.cdf(xs, scale=1.0/lam)
return xs, ps
def RenderNormalCdf(mu, sigma, low, high, n=101):
"""Generates sequences of xs and ps for a Normal CDF.
mu: parameter
sigma: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = stats.norm.cdf(xs, mu, sigma)
return xs, ps
def RenderParetoCdf(xmin, alpha, low, high, n=50):
"""Generates sequences of xs and ps for a Pareto CDF.
xmin: parameter
alpha: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
if low < xmin:
low = xmin
xs = np.linspace(low, high, n)
ps = 1 - (xs / xmin) ** -alpha
#ps = stats.pareto.cdf(xs, scale=xmin, b=alpha)
return xs, ps
class Beta(object):
"""Represents a Beta distribution.
See http://en.wikipedia.org/wiki/Beta_distribution
"""
def __init__(self, alpha=1, beta=1, label=None):
"""Initializes a Beta distribution."""
self.alpha = alpha
self.beta = beta
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
heads, tails = data
self.alpha += heads
self.beta += tails
def Mean(self):
"""Computes the mean of this distribution."""
return self.alpha / (self.alpha + self.beta)
def Random(self):
"""Generates a random variate from this distribution."""
return random.betavariate(self.alpha, self.beta)
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int sample size
"""
size = n,
return np.random.beta(self.alpha, self.beta, size)
def EvalPdf(self, x):
"""Evaluates the PDF at x."""
return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1)
def MakePmf(self, steps=101, label=None):
"""Returns a Pmf of this distribution.
Note: Normally, we just evaluate the PDF at a sequence
of points and treat the probability density as a probability
mass.
But if alpha or beta is less than one, we have to be
more careful because the PDF goes to infinity at x=0
and x=1. In that case we evaluate the CDF and compute
differences.
"""
if self.alpha < 1 or self.beta < 1:
cdf = self.MakeCdf()
pmf = cdf.MakePmf()
return pmf
xs = [i / (steps - 1.0) for i in range(steps)]
probs = [self.EvalPdf(x) for x in xs]
pmf = Pmf(dict(zip(xs, probs)), label=label)
return pmf
def MakeCdf(self, steps=101):
"""Returns the CDF of this distribution."""
xs = [i / (steps - 1.0) for i in range(steps)]
ps = [special.betainc(self.alpha, self.beta, x) for x in xs]
cdf = Cdf(xs, ps)
return cdf
class Dirichlet(object):
"""Represents a Dirichlet distribution.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
"""
def __init__(self, n, conc=1, label=None):
"""Initializes a Dirichlet distribution.
n: number of dimensions
conc: concentration parameter (smaller yields more concentration)
label: string label
"""
if n < 2:
raise ValueError('A Dirichlet distribution with '
'n<2 makes no sense')
self.n = n
self.params = np.ones(n, dtype=np.float) * conc
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data
def Random(self):
"""Generates a random variate from this distribution.
Returns: normalized vector of fractions
"""
p = np.random.gamma(self.params)
return p / p.sum()
def Likelihood(self, data):
"""Computes the likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float probability
"""
m = len(data)
if self.n < m:
return 0
x = data
p = self.Random()
q = p[:m] ** x
return q.prod()
def LogLikelihood(self, data):
"""Computes the log likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float log probability
"""
m = len(data)
if self.n < m:
return float('-inf')
x = self.Random()
y = np.log(x[:m]) * data
return y.sum()
def MarginalBeta(self, i):
"""Computes the marginal distribution of the ith element.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
#Marginal_distributions
i: int
Returns: Beta object
"""
alpha0 = self.params.sum()
alpha = self.params[i]
return Beta(alpha, alpha0 - alpha)
def PredictivePmf(self, xs, label=None):
"""Makes a predictive distribution.
xs: values to go into the Pmf
Returns: Pmf that maps from x to the mean prevalence of x
"""
alpha0 = self.params.sum()
ps = self.params / alpha0
return Pmf(zip(xs, ps), label=label)
def BinomialCoef(n, k):
"""Compute the binomial coefficient "n choose k".
n: number of trials
k: number of successes
Returns: float
"""
return scipy.misc.comb(n, k)
def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k)
def NormalProbability(ys, jitter=0.0):
"""Generates data for a normal probability plot.
ys: sequence of values
jitter: float magnitude of jitter added to the ys
returns: numpy arrays xs, ys
"""
n = len(ys)
xs = np.random.normal(0, 1, n)
xs.sort()
if jitter:
ys = Jitter(ys, jitter)
else:
ys = np.array(ys)
ys.sort()
return xs, ys
def Jitter(values, jitter=0.5):
"""Jitters the values by adding a uniform variate in (-jitter, jitter).
values: sequence
jitter: scalar magnitude of jitter
returns: new numpy array
"""
n = len(values)
return np.random.uniform(-jitter, +jitter, n) + values
def NormalProbabilityPlot(sample, fit_color='0.8', **options):
"""Makes a normal probability plot with a fitted line.
sample: sequence of numbers
fit_color: color string for the fitted line
options: passed along to Plot
"""
xs, ys = NormalProbability(sample)
mean, var = MeanVar(sample)
std = math.sqrt(var)
fit = FitLine(xs, mean, std)
thinkplot.Plot(*fit, color=fit_color, label='model')
xs, ys = NormalProbability(sample)
thinkplot.Plot(xs, ys, **options)
def Mean(xs):
"""Computes mean.
xs: sequence of values
returns: float mean
"""
return np.mean(xs)
def Var(xs, mu=None, ddof=0):
"""Computes variance.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
xs = np.asarray(xs)
if mu is None:
mu = xs.mean()
ds = xs - mu
return np.dot(ds, ds) / (len(xs) - ddof)
def Std(xs, mu=None, ddof=0):
"""Computes standard deviation.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
var = Var(xs, mu, ddof)
return math.sqrt(var)
def MeanVar(xs, ddof=0):
"""Computes mean and variance.
Based on http://stackoverflow.com/questions/19391149/
numpy-mean-and-variance-from-single-function
xs: sequence of values
ddof: delta degrees of freedom
returns: pair of float, mean and var
"""
xs = np.asarray(xs)
mean = xs.mean()
s2 = Var(xs, mean, ddof)
return mean, s2
def Trim(t, p=0.01):
"""Trims the largest and smallest elements of t.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
sequence of values
"""
n = int(p * len(t))
t = sorted(t)[n:-n]
return t
def TrimmedMean(t, p=0.01):
"""Computes the trimmed mean of a sequence of numbers.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
return Mean(t)
def TrimmedMeanVar(t, p=0.01):
"""Computes the trimmed mean and variance of a sequence of numbers.
Side effect: sorts the list.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
mu, var = MeanVar(t)
return mu, var
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / math.sqrt(pooled_var)
return d
def Cov(xs, ys, meanx=None, meany=None):
"""Computes Cov(X, Y).
Args:
xs: sequence of values
ys: sequence of values
meanx: optional float mean of xs
meany: optional float mean of ys
Returns:
Cov(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
def Corr(xs, ys):
"""Computes Corr(X, Y).
Args:
xs: sequence of values
ys: sequence of values
Returns:
Corr(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
meanx, varx = MeanVar(xs)
meany, vary = MeanVar(ys)
corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary)
return corr
def SerialCorr(series, lag=1):
"""Computes the serial correlation of a series.
series: Series
lag: integer number of intervals to shift
returns: float correlation
"""
xs = series[lag:]
ys = series.shift(lag)[lag:]
corr = Corr(xs, ys)
return corr
def SpearmanCorr(xs, ys):
"""Computes Spearman's rank correlation.
Args:
xs: sequence of values
ys: sequence of values
Returns:
float Spearman's correlation
"""
xranks = pandas.Series(xs).rank()
yranks = pandas.Series(ys).rank()
return Corr(xranks, yranks)
def MapToRanks(t):
"""Returns a list of ranks corresponding to the elements in t.
Args:
t: sequence of numbers
Returns:
list of integer ranks, starting at 1
"""
# pair up each value with its index
pairs = enumerate(t)
# sort by value
sorted_pairs = sorted(pairs, key=itemgetter(1))
# pair up each pair with its rank
ranked = enumerate(sorted_pairs)
# sort by index
resorted = sorted(ranked, key=lambda trip: trip[1][0])
# extract the ranks
ranks = [trip[0]+1 for trip in resorted]
return ranks
def LeastSquares(xs, ys):
"""Computes a linear least squares fit for ys as a function of xs.
Args:
xs: sequence of values
ys: sequence of values
Returns:
tuple of (intercept, slope)
"""
meanx, varx = MeanVar(xs)
meany = Mean(ys)
slope = Cov(xs, ys, meanx, meany) / varx
inter = meany - slope * meanx
return inter, slope
def FitLine(xs, inter, slope):
"""Fits a line to the given data.
xs: sequence of x
returns: tuple of numpy arrays (sorted xs, fit ys)
"""
fit_xs = np.sort(xs)
fit_ys = inter + slope * fit_xs
return fit_xs, fit_ys
def Residuals(xs, ys, inter, slope):
"""Computes residuals for a linear fit with parameters inter and slope.
Args:
xs: independent variable
ys: dependent variable
inter: float intercept
slope: float slope
Returns:
list of residuals
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
res = ys - (inter + slope * xs)
return res
def CoefDetermination(ys, res):
"""Computes the coefficient of determination (R^2) for given residuals.
Args:
ys: dependent variable
res: residuals
Returns:
float coefficient of determination
"""
return 1 - Var(res) / Var(ys)
def CorrelatedGenerator(rho):
"""Generates standard normal variates with serial correlation.
rho: target coefficient of correlation
Returns: iterable
"""
x = random.gauss(0, 1)
yield x
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield x
def CorrelatedNormalGenerator(mu, sigma, rho):
"""Generates normal variates with serial correlation.
mu: mean of variate
sigma: standard deviation of variate
rho: target coefficient of correlation
Returns: iterable
"""
for x in CorrelatedGenerator(rho):
yield x * sigma + mu
def RawMoment(xs, k):
"""Computes the kth raw moment of xs.
"""
return sum(x**k for x in xs) / len(xs)
def CentralMoment(xs, k):
"""Computes the kth central moment of xs.
"""
mean = RawMoment(xs, 1)
return sum((x - mean)**k for x in xs) / len(xs)
def StandardizedMoment(xs, k):
"""Computes the kth standardized moment of xs.
"""
var = CentralMoment(xs, 2)
std = math.sqrt(var)
return CentralMoment(xs, k) / std**k
def Skewness(xs):
"""Computes skewness.
"""
return StandardizedMoment(xs, 3)
def Median(xs):
"""Computes the median (50th percentile) of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: float
"""
cdf = Cdf(xs)
return cdf.Value(0.5)
def IQR(xs):
"""Computes the interquartile of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: pair of floats
"""
cdf = Cdf(xs)
return cdf.Value(0.25), cdf.Value(0.75)
def PearsonMedianSkewness(xs):
"""Computes the Pearson median skewness.
"""
median = Median(xs)
mean = RawMoment(xs, 1)
var = CentralMoment(xs, 2)
std = math.sqrt(var)
gp = 3 * (mean - median) / std
return gp
class FixedWidthVariables(object):
"""Represents a set of variables in a fixed width file."""
def __init__(self, variables, index_base=0):
"""Initializes.
variables: DataFrame
index_base: are the indices 0 or 1 based?
Attributes:
colspecs: list of (start, end) index tuples
names: list of string variable names
"""
self.variables = variables
# note: by default, subtract 1 from colspecs
self.colspecs = variables[['start', 'end']] - index_base
# convert colspecs to a list of pair of int
self.colspecs = self.colspecs.astype(np.int).values.tolist()
self.names = variables['name']
def ReadFixedWidth(self, filename, **options):
"""Reads a fixed width ASCII file.
filename: string filename
returns: DataFrame
"""
df = pandas.read_fwf(filename,
colspecs=self.colspecs,
names=self.names,
**options)
return df
def ReadStataDct(dct_file, **options):
"""Reads a Stata dictionary file.
dct_file: string filename
options: dict of options passed to open()
returns: FixedWidthVariables object
"""
type_map = dict(byte=int, int=int, long=int, float=float, double=float)
var_info = []
for line in open(dct_file, **options):
match = re.search( r'_column\(([^)]*)\)', line)
if match:
start = int(match.group(1))
t = line.split()
vtype, name, fstring = t[1:4]
name = name.lower()
if vtype.startswith('str'):
vtype = str
else:
vtype = type_map[vtype]
long_desc = ' '.join(t[4:]).strip('"')
var_info.append((start, vtype, name, fstring, long_desc))
columns = ['start', 'type', 'name', 'fstring', 'desc']
variables = pandas.DataFrame(var_info, columns=columns)
# fill in the end column by shifting the start column
variables['end'] = variables.start.shift(-1)
variables.loc[len(variables)-1, 'end'] = 0
dct = FixedWidthVariables(variables, index_base=1)
return dct
def Resample(xs, n=None):
"""Draw a sample from xs with the same length as xs.
xs: sequence
n: sample size (default: len(xs))
returns: NumPy array
"""
if n is None:
n = len(xs)
return np.random.choice(xs, n, replace=True)
def SampleRows(df, nrows, replace=False):
"""Choose a sample of rows from a DataFrame.
df: DataFrame
nrows: number of rows
replace: whether to sample with replacement
returns: DataDf
"""
indices = np.random.choice(df.index, nrows, replace=replace)
sample = df.loc[indices]
return sample
def ResampleRows(df):
"""Resamples rows from a DataFrame.
df: DataFrame
returns: DataFrame
"""
return SampleRows(df, len(df), replace=True)
def ResampleRowsWeighted(df, column='finalwgt'):
"""Resamples a DataFrame using probabilities proportional to given column.
df: DataFrame
column: string column name to use as weights
returns: DataFrame
"""
weights = df[column]
cdf = Cdf(dict(weights))
indices = cdf.Sample(len(weights))
sample = df.loc[indices]
return sample
def PercentileRow(array, p):
"""Selects the row from a sorted array that maps to percentile p.
p: float 0--100
returns: NumPy array (one row)
"""
rows, cols = array.shape
index = int(rows * p / 100)
return array[index,]
def PercentileRows(ys_seq, percents):
"""Given a collection of lines, selects percentiles along vertical axis.
For example, if ys_seq contains simulation results like ys as a
function of time, and percents contains (5, 95), the result would
be a 90% CI for each vertical slice of the simulation results.
ys_seq: sequence of lines (y values)
percents: list of percentiles (0-100) to select
returns: list of NumPy arrays, one for each percentile
"""
nrows = len(ys_seq)
ncols = len(ys_seq[0])
array = np.zeros((nrows, ncols))
for i, ys in enumerate(ys_seq):
array[i,] = ys
array = np.sort(array, axis=0)
rows = [PercentileRow(array, p) for p in percents]
return rows
def Smooth(xs, sigma=2, **options):
"""Smooths a NumPy array with a Gaussian filter.
xs: sequence
sigma: standard deviation of the filter
"""
return ndimage.filters.gaussian_filter1d(xs, sigma, **options)
class HypothesisTest(object):
"""Represents a hypothesis test."""
def __init__(self, data):
"""Initializes.
data: data in whatever form is relevant
"""
self.data = data
self.MakeModel()
self.actual = self.TestStatistic(data)
self.test_stats = None
self.test_cdf = None
def PValue(self, iters=1000):
"""Computes the distribution of the test statistic and p-value.
iters: number of iterations
returns: float p-value
"""
self.test_stats = [self.TestStatistic(self.RunModel())
for _ in range(iters)]
self.test_cdf = Cdf(self.test_stats)
count = sum(1 for x in self.test_stats if x >= self.actual)
return count / iters
def MaxTestStat(self):
"""Returns the largest test statistic seen during simulations.
"""
return max(self.test_stats)
def PlotCdf(self, label=None):
"""Draws a Cdf with vertical lines at the observed test stat.
"""
def VertLine(x):
"""Draws a vertical line at x."""
thinkplot.Plot([x, x], [0, 1], color='0.8')
VertLine(self.actual)
thinkplot.Cdf(self.test_cdf, label=label)
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
raise UnimplementedMethodException()
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
pass
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
raise UnimplementedMethodException()
def main():
pass
if __name__ == '__main__':
main()
| gpl-3.0 |
paulorauber/nn | examples/rnn.py | 1 | 2389 | import numpy as np
from sklearn.utils import check_random_state
from nn.model.recurrent import RecurrentNetwork
random_state = check_random_state(None)
def nback(n, k, length):
"""Random n-back targets given n, number of digits k and sequence length"""
Xi = random_state.randint(k, size=length)
yi = np.zeros(length, dtype=int)
for t in range(n, length):
yi[t] = (Xi[t - n] == Xi[t])
return Xi, yi
def one_of_k(Xi_, k):
Xi = np.zeros((len(Xi_), k))
for t, Xit in np.ndenumerate(Xi_):
Xi[t, Xit] = 1
return Xi
def nback_dataset(n_sequences, mean_length, std_length, n, k):
X, y = [], []
for _ in range(n_sequences):
length = random_state.normal(loc=mean_length, scale=std_length)
length = int(max(n + 1, length))
Xi_, yi = nback(n, k, length)
Xi = one_of_k(Xi_, k)
X.append(Xi)
y.append(yi)
return X, y
def nback_example():
# Input dimension
k = 4
# n-back
n = 3
n_sequences = 100
mean_length = 20
std_length = 5
# Training
Xtrain, ytrain = nback_dataset(n_sequences, mean_length, std_length, n, k)
rnn = RecurrentNetwork(64, learning_rate=2.0, n_epochs=30,
lmbda=0.0, mu=0.2, output_activation='softmax',
random_state=None, verbose=1)
rnn.fit(Xtrain, ytrain)
# Evaluating
Xtest, ytest = nback_dataset(5*n_sequences, 5*mean_length, 5*std_length, n, k)
print('Average accuracy: {0:.3f}'.format(rnn.score(Xtest, ytest)))
acc_zeros = 0.0
for yi in ytest:
acc_zeros += float((yi == 0).sum()) / len(yi)
acc_zeros /= len(ytest)
print('Negative guess accuracy: {0:.3f}'.format(acc_zeros))
# Example
Xi_ = [3, 2, 1, 3, 2, 1, 3, 2, 2, 1, 2, 3, 1, 2, 0, 0, 2, 0]
print('\nExample sequence: {0}'.format(Xi_))
yi = np.zeros(len(Xi_), dtype=int)
for t in range(n, len(Xi_)):
yi[t] = (Xi_[t - n] == Xi_[t])
Xi = one_of_k(Xi_, k)
yipred = rnn.predict([Xi])[0]
print('Correct: \t{0}'.format(yi))
print('Predicted: \t{0}'.format(yipred))
print('Accuracy: {0:.3f}'.format(float((yi == yipred).sum())/len(yi)))
def main():
nback_example()
if __name__ == "__main__":
main() | mit |
agopalak/football_pred | pre_proc/proc_data.py | 1 | 4667 |
import sys
import yaml
import re
import datetime as DT
import logging
from rainbow_logging_handler import RainbowLoggingHandler
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn_pandas import DataFrameMapper
# Capturing current module. Needed to call getattr on this module
this_module = sys.modules[__name__]
# Setup logging module
# TODO: Figure out a standard way to install/handle logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(filename)s:%(lineno)4s - %(funcName)15s()] %(levelname)8s: %(message)s')
# Setup RainbowLoggingHandler
handler = RainbowLoggingHandler(sys.stderr, color_funcName=('black', 'yellow', True))
handler.setFormatter(formatter)
logger.addHandler(handler)
# Converting Boolean to String during YAML load
# Done to workaround quirkness with PyYAML
def bool_constructor(self, node):
value = self.construct_yaml_bool(node)
if value == False:
return 'False'
else:
return 'True'
yaml.Loader.add_constructor(u'tag:yaml.org,2002:bool', bool_constructor)
yaml.SafeLoader.add_constructor(u'tag:yaml.org,2002:bool', bool_constructor)
# Load data from CSV, configuration file
# Process data and provide input/output data frames
def load_data(data_csv, data_cfg):
# Load Data YAML configuration file
with open(data_cfg, 'r') as yf:
data = yaml.load(yf)
# Read CSV into data frame
df = pd.read_csv(data_csv)
# Filling holes with zeros
df.fillna(0, inplace=True)
# Process Columns
for item in data:
if item['include'] == False:
continue
else:
colnum = item['column']
logger.info('Processing Column %s', colnum)
# Create a column data frame
col_df = df.iloc[:, [colnum-1]].copy()
logger.debug(col_df.columns)
logger.debug('Preprocess Column Input\n%s', col_df.head())
# Apply transformations
col_df = do_transform(col_df, item['transform'])
logger.debug('Preprocess Column Output\n%s', col_df.head())
# Perform Data Transformations
def do_transform(df, tf):
for func in tf:
funckey, funcval = func.items()[0]
# Getting transformation call name
transform = getattr(this_module, funckey, None)
# Splitting funcval to individual function arguments
# First argument is True/False to indicate if transform is called
try:
pattern = re.compile('\s*,\s*')
funcvals = pattern.split(funcval)
logger.debug('Funcvals --> %s', funcvals)
except AttributeError:
funcvals = [funcval]
# Calling transformation
if funcvals[0] == 'True':
try:
logger.debug('Funckey --> %s', funckey)
df = transform(df, funcvals[1:])
except AttributeError:
logger.error('Function %s has not been implemented!', funckey)
return df
# Performs feature scaling on data frame
# TODO: scale - Add implementation to handle val
def scale(df, val):
logger.info('Function %s called..', sys._getframe().f_code.co_name)
mms = preprocessing.MinMaxScaler()
return pd.DataFrame(mms.fit_transform(df.values.ravel().reshape(-1, 1)), columns=df.columns)
# conv2num: Converts column data to ordered integers
# TODO: conv2num - Add implementation to handle args
def conv2num(df, args):
logger.info('Function %s called..', sys._getframe().f_code.co_name)
le = preprocessing.LabelEncoder()
return pd.DataFrame(le.fit_transform(df.values.ravel()), columns=df.columns)
# conv2bin: Converts column data to binary
# TODO: conv2bin - Add implementation to handle args
def conv2bin(df, args):
logger.info('Function %s called..', sys._getframe().f_code.co_name)
le = preprocessing.LabelBinarizer()
return pd.DataFrame(le.fit_transform(df.values.ravel()), columns=df.columns)
# conv2timedelta: Converts column data to age
# TODO: conv2timedelta - Current returns in years. May need to make it more scalable
def conv2timedelta(df, args):
logger.info('Function %s called..', sys._getframe().f_code.co_name)
if args[1] == 'now':
refdate = pd.Timestamp(DT.datetime.now())
else:
refdate = pd.Timestamp(DT.datetime.strptime(args[1], args[0]))
logger.debug('Reference date is: %s', refdate)
df = pd.DataFrame((refdate - pd.to_datetime(df.values.ravel())), columns=df.columns)
return df.apply(lambda x: (x/np.timedelta64(1, 'Y')).astype(int))
# Main Program
if __name__ == '__main__':
load_data('nflData.csv', 'datacfg.yaml')
| mit |
alexeyum/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
Akshay0724/scikit-learn | sklearn/gaussian_process/tests/test_kernels.py | 3 | 12567 | """Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
from sklearn.externals.funcsigs import signature
import numpy as np
from sklearn.gaussian_process.kernels import _approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation)
from sklearn.base import clone
from sklearn.utils.testing import (assert_equal, assert_almost_equal,
assert_not_equal, assert_array_equal,
assert_array_almost_equal)
X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
kernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2,
RBF(length_scale=[2.0]), Matern(length_scale=[2.0])]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
def test_kernel_gradient():
# Compare analytic and numeric gradient of kernels.
for kernel in kernels:
K, K_gradient = kernel(X, eval_gradient=True)
assert_equal(K_gradient.shape[0], X.shape[0])
assert_equal(K_gradient.shape[1], X.shape[0])
assert_equal(K_gradient.shape[2], kernel.theta.shape[0])
def eval_kernel_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = \
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
def test_kernel_theta():
# Check that parameter vector theta of kernel is set correctly.
for kernel in kernels:
if isinstance(kernel, KernelOperator) \
or isinstance(kernel, Exponentiation): # skip non-basic kernels
continue
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s[0:-len("_bounds")],
filter(lambda s: s.endswith("_bounds"), args))
assert_equal(
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters),
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert_equal(theta[i],
np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert_equal(theta.shape[0], new_kernel.theta.shape[0] + 1)
assert_equal(K_gradient.shape[2], K_gradient_new.shape[2] + 1)
if i > 0:
assert_equal(theta[:i], new_kernel.theta[:i])
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert_equal(theta[i + 1:], new_kernel.theta[i:])
assert_array_equal(K_gradient[..., i + 1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
def test_auto_vs_cross():
# Auto-correlation and cross-correlation should be consistent.
for kernel in kernels:
if kernel == kernel_white:
continue # Identity is not satisfied on diagonal
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
def test_kernel_diag():
# Test that diag method of kernel returns consistent results.
for kernel in kernels:
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
# Adding kernels and multiplying kernels should be commutative.
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
# Anisotropic kernel should be consistent with isotropic kernels.
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
def test_kernel_stationary():
# Test stationarity of kernels.
for kernel in kernels:
if not kernel.is_stationary():
continue
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
def check_hyperparameters_equal(kernel1, kernel2):
# Check that hyperparameters of two kernels are equal
for attr in set(dir(kernel1) + dir(kernel2)):
if attr.startswith("hyperparameter_"):
attr_value1 = getattr(kernel1, attr)
attr_value2 = getattr(kernel2, attr)
assert_equal(attr_value1, attr_value2)
def test_kernel_clone():
# Test that sklearn's clone works correctly on kernels.
bounds = (1e-5, 1e5)
for kernel in kernels:
kernel_cloned = clone(kernel)
# XXX: Should this be fixed?
# This differs from the sklearn's estimators equality check.
assert_equal(kernel, kernel_cloned)
assert_not_equal(id(kernel), id(kernel_cloned))
# Check that all constructor parameters are equal.
assert_equal(kernel.get_params(), kernel_cloned.get_params())
# Check that all hyperparameters are equal.
yield check_hyperparameters_equal, kernel, kernel_cloned
# This test is to verify that using set_params does not
# break clone on kernels.
# This used to break because in kernels such as the RBF, non-trivial
# logic that modified the length scale used to be in the constructor
# See https://github.com/scikit-learn/scikit-learn/issues/6961
# for more details.
params = kernel.get_params()
# RationalQuadratic kernel is isotropic.
isotropic_kernels = (ExpSineSquared, RationalQuadratic)
if 'length_scale' in params and not isinstance(kernel,
isotropic_kernels):
length_scale = params['length_scale']
if np.iterable(length_scale):
params['length_scale'] = length_scale[0]
params['length_scale_bounds'] = bounds
else:
params['length_scale'] = [length_scale] * 2
params['length_scale_bounds'] = bounds * 2
kernel_cloned.set_params(**params)
kernel_cloned_clone = clone(kernel_cloned)
assert_equal(kernel_cloned_clone.get_params(),
kernel_cloned.get_params())
assert_not_equal(id(kernel_cloned_clone), id(kernel_cloned))
yield (check_hyperparameters_equal, kernel_cloned,
kernel_cloned_clone)
def test_matern_kernel():
# Test consistency of Matern kernel for special values of nu.
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
def test_kernel_versus_pairwise():
# Check that GP kernels can also be used as pairwise kernels.
for kernel in kernels:
# Test auto-kernel
if kernel != kernel_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
def test_set_get_params():
# Check that set_params()/get_params() is consistent with kernel.theta.
for kernel in kernels:
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds == "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds == "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value] * size})
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
[value] * size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
def test_repr_kernels():
# Smoke-test for repr in kernels.
for kernel in kernels:
repr(kernel)
| bsd-3-clause |
wilsonkichoi/zipline | zipline/data/data_portal.py | 1 | 64491 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from operator import mul
import bcolz
from logbook import Logger
import numpy as np
import pandas as pd
from pandas.tslib import normalize_date
from six import iteritems
from six.moves import reduce
from zipline.assets import Asset, Future, Equity
from zipline.data.us_equity_pricing import NoDataOnDate
from zipline.data.us_equity_loader import (
USEquityDailyHistoryLoader,
USEquityMinuteHistoryLoader,
)
from zipline.utils import tradingcalendar
from zipline.utils.math_utils import (
nansum,
nanmean,
nanstd
)
from zipline.utils.memoize import remember_last, weak_lru_cache
from zipline.errors import (
NoTradeDataAvailableTooEarly,
NoTradeDataAvailableTooLate,
HistoryWindowStartsBeforeData,
)
log = Logger('DataPortal')
BASE_FIELDS = frozenset([
"open", "high", "low", "close", "volume", "price", "last_traded"
])
OHLCV_FIELDS = frozenset([
"open", "high", "low", "close", "volume"
])
OHLCVP_FIELDS = frozenset([
"open", "high", "low", "close", "volume", "price"
])
HISTORY_FREQUENCIES = set(["1m", "1d"])
class DailyHistoryAggregator(object):
"""
Converts minute pricing data into a daily summary, to be used for the
last slot in a call to history with a frequency of `1d`.
This summary is the same as a daily bar rollup of minute data, with the
distinction that the summary is truncated to the `dt` requested.
i.e. the aggregation slides forward during a the course of simulation day.
Provides aggregation for `open`, `high`, `low`, `close`, and `volume`.
The aggregation rules for each price type is documented in their respective
"""
def __init__(self, market_opens, minute_reader):
self._market_opens = market_opens
self._minute_reader = minute_reader
# The caches are structured as (date, market_open, entries), where
# entries is a dict of asset -> (last_visited_dt, value)
#
# Whenever an aggregation method determines the current value,
# the entry for the respective asset should be overwritten with a new
# entry for the current dt.value (int) and aggregation value.
#
# When the requested dt's date is different from date the cache is
# flushed, so that the cache entries do not grow unbounded.
#
# Example cache:
# cache = (date(2016, 3, 17),
# pd.Timestamp('2016-03-17 13:31', tz='UTC'),
# {
# 1: (1458221460000000000, np.nan),
# 2: (1458221460000000000, 42.0),
# })
self._caches = {
'open': None,
'high': None,
'low': None,
'close': None,
'volume': None
}
# The int value is used for deltas to avoid extra computation from
# creating new Timestamps.
self._one_min = pd.Timedelta('1 min').value
def _prelude(self, dt, field):
date = dt.date()
dt_value = dt.value
cache = self._caches[field]
if cache is None or cache[0] != date:
market_open = self._market_opens.loc[date]
cache = self._caches[field] = (dt.date(), market_open, {})
_, market_open, entries = cache
if dt != market_open:
prev_dt = dt_value - self._one_min
else:
prev_dt = None
return market_open, prev_dt, dt_value, entries
def opens(self, assets, dt):
"""
The open field's aggregation returns the first value that occurs
for the day, if there has been no data on or before the `dt` the open
is `nan`.
Once the first non-nan open is seen, that value remains constant per
asset for the remainder of the day.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open')
opens = []
normalized_date = normalize_date(dt)
for asset in assets:
if not asset._is_alive(normalized_date, True):
opens.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'open')
entries[asset] = (dt_value, val)
opens.append(val)
continue
else:
try:
last_visited_dt, first_open = entries[asset]
if last_visited_dt == dt_value:
opens.append(first_open)
continue
elif not pd.isnull(first_open):
opens.append(first_open)
entries[asset] = (dt_value, first_open)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['open'],
after_last,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['open'],
market_open,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
return np.array(opens)
def highs(self, assets, dt):
"""
The high field's aggregation returns the largest high seen between
the market open and the current dt.
If there has been no data on or before the `dt` the high is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high')
highs = []
normalized_date = normalize_date(dt)
for asset in assets:
if not asset._is_alive(normalized_date, True):
highs.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'high')
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
try:
last_visited_dt, last_max = entries[asset]
if last_visited_dt == dt_value:
highs.append(last_max)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'high')
if pd.isnull(curr_val):
val = last_max
elif pd.isnull(last_max):
val = curr_val
else:
val = max(last_max, curr_val)
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['high'],
after_last,
dt,
[asset],
)[0].T
val = max(last_max, np.nanmax(window))
entries[asset] = (dt_value, val)
highs.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['high'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmax(window)
entries[asset] = (dt_value, val)
highs.append(val)
continue
return np.array(highs)
def lows(self, assets, dt):
"""
The low field's aggregation returns the smallest low seen between
the market open and the current dt.
If there has been no data on or before the `dt` the low is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low')
lows = []
normalized_date = normalize_date(dt)
for asset in assets:
if not asset._is_alive(normalized_date, True):
lows.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'low')
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
try:
last_visited_dt, last_min = entries[asset]
if last_visited_dt == dt_value:
lows.append(last_min)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'low')
val = np.nanmin([last_min, curr_val])
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['low'],
after_last,
dt,
[asset],
)[0].T
window_min = np.nanmin(window)
if pd.isnull(window_min):
val = last_min
else:
val = min(last_min, window_min)
entries[asset] = (dt_value, val)
lows.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['low'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmin(window)
entries[asset] = (dt_value, val)
lows.append(val)
continue
return np.array(lows)
def closes(self, assets, dt):
"""
The close field's aggregation returns the latest close at the given
dt.
If the close for the given dt is `nan`, the most recent non-nan
`close` is used.
If there has been no data on or before the `dt` the close is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close')
closes = []
normalized_dt = normalize_date(dt)
for asset in assets:
if not asset._is_alive(normalized_dt, True):
closes.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'close')
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
try:
last_visited_dt, last_close = entries[asset]
if last_visited_dt == dt_value:
closes.append(last_close)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = last_close
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = self.closes(
[asset],
pd.Timestamp(prev_dt, tz='UTC'))[0]
entries[asset] = (dt_value, val)
closes.append(val)
continue
except KeyError:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = self.closes([asset],
pd.Timestamp(prev_dt, tz='UTC'))[0]
entries[asset] = (dt_value, val)
closes.append(val)
continue
return np.array(closes)
def volumes(self, assets, dt):
"""
The volume field's aggregation returns the sum of all volumes
between the market open and the `dt`
If there has been no data on or before the `dt` the volume is 0.
Returns
-------
np.array with dtype=int64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume')
volumes = []
normalized_date = normalize_date(dt)
for asset in assets:
if not asset._is_alive(normalized_date, True):
volumes.append(0)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'volume')
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
try:
last_visited_dt, last_total = entries[asset]
if last_visited_dt == dt_value:
volumes.append(last_total)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'volume')
val += last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['volume'],
after_last,
dt,
[asset],
)[0]
val = np.nansum(window) + last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['volume'],
market_open,
dt,
[asset],
)[0]
val = np.nansum(window)
entries[asset] = (dt_value, val)
volumes.append(val)
continue
return np.array(volumes)
class DataPortal(object):
"""Interface to all of the data that a zipline simulation needs.
This is used by the simulation runner to answer questions about the data,
like getting the prices of assets on a given day or to service history
calls.
Parameters
----------
env : TradingEnvironment
The trading environment for the simulation. This includes the trading
calendar and benchmark data.
first_trading_day : pd.Timestamp
The first trading day for the simulation.
equity_daily_reader : BcolzDailyBarReader, optional
The daily bar reader for equities. This will be used to service
daily data backtests or daily history calls in a minute backetest.
If a daily bar reader is not provided but a minute bar reader is,
the minutes will be rolled up to serve the daily requests.
equity_minute_reader : BcolzMinuteBarReader, optional
The minute bar reader for equities. This will be used to service
minute data backtests or minute history calls. This can be used
to serve daily calls if no daily bar reader is provided.
future_daily_reader : BcolzDailyBarReader, optional
The daily bar ready for futures. This will be used to service
daily data backtests or daily history calls in a minute backetest.
If a daily bar reader is not provided but a minute bar reader is,
the minutes will be rolled up to serve the daily requests.
future_minute_reader : BcolzMinuteBarReader, optional
The minute bar reader for futures. This will be used to service
minute data backtests or minute history calls. This can be used
to serve daily calls if no daily bar reader is provided.
adjustment_reader : SQLiteAdjustmentWriter, optional
The adjustment reader. This is used to apply splits, dividends, and
other adjustment data to the raw data from the readers.
"""
def __init__(self,
env,
first_trading_day,
equity_daily_reader=None,
equity_minute_reader=None,
future_daily_reader=None,
future_minute_reader=None,
adjustment_reader=None):
self.env = env
self.views = {}
self._asset_finder = env.asset_finder
self._carrays = {
'open': {},
'high': {},
'low': {},
'close': {},
'volume': {},
'sid': {},
}
self._adjustment_reader = adjustment_reader
# caches of sid -> adjustment list
self._splits_dict = {}
self._mergers_dict = {}
self._dividends_dict = {}
# Cache of sid -> the first trading day of an asset.
self._asset_start_dates = {}
self._asset_end_dates = {}
# Handle extra sources, like Fetcher.
self._augmented_sources_map = {}
self._extra_source_df = None
self._equity_daily_reader = equity_daily_reader
if self._equity_daily_reader is not None:
self._equity_history_loader = USEquityDailyHistoryLoader(
self.env,
self._equity_daily_reader,
self._adjustment_reader
)
self._equity_minute_reader = equity_minute_reader
self._future_daily_reader = future_daily_reader
self._future_minute_reader = future_minute_reader
self._first_trading_day = first_trading_day
if self._equity_minute_reader is not None:
self._equity_daily_aggregator = DailyHistoryAggregator(
self.env.open_and_closes.market_open,
self._equity_minute_reader)
self._equity_minute_history_loader = USEquityMinuteHistoryLoader(
self.env,
self._equity_minute_reader,
self._adjustment_reader
)
self.MINUTE_PRICE_ADJUSTMENT_FACTOR = \
self._equity_minute_reader._ohlc_inverse
def _reindex_extra_source(self, df, source_date_index):
return df.reindex(index=source_date_index, method='ffill')
def handle_extra_source(self, source_df, sim_params):
"""
Extra sources always have a sid column.
We expand the given data (by forward filling) to the full range of
the simulation dates, so that lookup is fast during simulation.
"""
if source_df is None:
return
# Normalize all the dates in the df
source_df.index = source_df.index.normalize()
# source_df's sid column can either consist of assets we know about
# (such as sid(24)) or of assets we don't know about (such as
# palladium).
#
# In both cases, we break up the dataframe into individual dfs
# that only contain a single asset's information. ie, if source_df
# has data for PALLADIUM and GOLD, we split source_df into two
# dataframes, one for each. (same applies if source_df has data for
# AAPL and IBM).
#
# We then take each child df and reindex it to the simulation's date
# range by forward-filling missing values. this makes reads simpler.
#
# Finally, we store the data. For each column, we store a mapping in
# self.augmented_sources_map from the column to a dictionary of
# asset -> df. In other words,
# self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df
# holding that data.
source_date_index = self.env.days_in_range(
start=sim_params.period_start,
end=sim_params.period_end
)
# Break the source_df up into one dataframe per sid. This lets
# us (more easily) calculate accurate start/end dates for each sid,
# de-dup data, and expand the data to fit the backtest start/end date.
grouped_by_sid = source_df.groupby(["sid"])
group_names = grouped_by_sid.groups.keys()
group_dict = {}
for group_name in group_names:
group_dict[group_name] = grouped_by_sid.get_group(group_name)
# This will be the dataframe which we query to get fetcher assets at
# any given time. Get's overwritten every time there's a new fetcher
# call
extra_source_df = pd.DataFrame()
for identifier, df in iteritems(group_dict):
# Before reindexing, save the earliest and latest dates
earliest_date = df.index[0]
latest_date = df.index[-1]
# Since we know this df only contains a single sid, we can safely
# de-dupe by the index (dt). If minute granularity, will take the
# last data point on any given day
df = df.groupby(level=0).last()
# Reindex the dataframe based on the backtest start/end date.
# This makes reads easier during the backtest.
df = self._reindex_extra_source(df, source_date_index)
if not isinstance(identifier, Asset):
# for fake assets we need to store a start/end date
self._asset_start_dates[identifier] = earliest_date
self._asset_end_dates[identifier] = latest_date
for col_name in df.columns.difference(['sid']):
if col_name not in self._augmented_sources_map:
self._augmented_sources_map[col_name] = {}
self._augmented_sources_map[col_name][identifier] = df
# Append to extra_source_df the reindexed dataframe for the single
# sid
extra_source_df = extra_source_df.append(df)
self._extra_source_df = extra_source_df
def _open_minute_file(self, field, asset):
sid_str = str(int(asset))
try:
carray = self._carrays[field][sid_str]
except KeyError:
carray = self._carrays[field][sid_str] = \
self._get_ctable(asset)[field]
return carray
def _get_ctable(self, asset):
sid = int(asset)
if isinstance(asset, Future):
if self._future_minute_reader.sid_path_func is not None:
path = self._future_minute_reader.sid_path_func(
self._future_minute_reader.rootdir, sid
)
else:
path = "{0}/{1}.bcolz".format(
self._future_minute_reader.rootdir, sid)
elif isinstance(asset, Equity):
if self._equity_minute_reader.sid_path_func is not None:
path = self._equity_minute_reader.sid_path_func(
self._equity_minute_reader.rootdir, sid
)
else:
path = "{0}/{1}.bcolz".format(
self._equity_minute_reader.rootdir, sid)
else:
# TODO: Figure out if assets should be allowed if neither, and
# why this code path is being hit.
if self._equity_minute_reader.sid_path_func is not None:
path = self._equity_minute_reader.sid_path_func(
self._equity_minute_reader.rootdir, sid
)
else:
path = "{0}/{1}.bcolz".format(
self._equity_minute_reader.rootdir, sid)
return bcolz.open(path, mode='r')
def get_last_traded_dt(self, asset, dt, data_frequency):
"""
Given an asset and dt, returns the last traded dt from the viewpoint
of the given dt.
If there is a trade on the dt, the answer is dt provided.
"""
if data_frequency == 'minute':
return self._equity_minute_reader.get_last_traded_dt(asset, dt)
elif data_frequency == 'daily':
return self._equity_daily_reader.get_last_traded_dt(asset, dt)
@staticmethod
def _is_extra_source(asset, field, map):
"""
Internal method that determines if this asset/field combination
represents a fetcher value or a regular OHLCVP lookup.
"""
# If we have an extra source with a column called "price", only look
# at it if it's on something like palladium and not AAPL (since our
# own price data always wins when dealing with assets).
return not (field in BASE_FIELDS and isinstance(asset, Asset))
def _get_fetcher_value(self, asset, field, dt):
day = normalize_date(dt)
try:
return \
self._augmented_sources_map[field][asset].loc[day, field]
except KeyError:
return np.NaN
def get_spot_value(self, asset, field, dt, data_frequency):
"""
Public API method that returns a scalar value representing the value
of the desired asset's field at either the given dt.
Parameters
----------
asset : Asset
The asset whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume',
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The spot value of ``field`` for ``asset`` The return type is based
on the ``field`` requested. If the field is one of 'open', 'high',
'low', 'close', or 'price', the value will be a float. If the
``field`` is 'volume' the value will be a int. If the ``field`` is
'last_traded' the value will be a Timestamp.
"""
if self._is_extra_source(asset, field, self._augmented_sources_map):
return self._get_fetcher_value(asset, field, dt)
if field not in BASE_FIELDS:
raise KeyError("Invalid column: " + str(field))
if dt < asset.start_date or \
(data_frequency == "daily" and dt > asset.end_date) or \
(data_frequency == "minute" and
normalize_date(dt) > asset.end_date):
if field == "volume":
return 0
elif field != "last_traded":
return np.NaN
if data_frequency == "daily":
day_to_use = dt
day_to_use = normalize_date(day_to_use)
return self._get_daily_data(asset, field, day_to_use)
else:
if isinstance(asset, Future):
return self._get_minute_spot_value_future(
asset, field, dt)
else:
if field == "last_traded":
return self._equity_minute_reader.get_last_traded_dt(
asset, dt
)
elif field == "price":
return self._get_minute_spot_value(asset, "close", dt,
True)
else:
return self._get_minute_spot_value(asset, field, dt)
def get_adjustments(self, assets, field, dt, perspective_dt):
"""
Returns a list of adjustments between the dt and perspective_dt for the
given field and list of assets
Parameters
----------
assets : list of type Asset, or Asset
The asset, or assets whose adjustments are desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
adjustments : list[Adjustment]
The adjustments to that field.
"""
if isinstance(assets, Asset):
assets = [assets]
adjustment_ratios_per_asset = []
split_adj_factor = lambda x: x if field != 'volume' else 1.0 / x
for asset in assets:
adjustments_for_asset = []
split_adjustments = self._get_adjustment_list(
asset, self._splits_dict, "SPLITS"
)
for adj_dt, adj in split_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(split_adj_factor(adj))
elif adj_dt > perspective_dt:
break
if field != 'volume':
merger_adjustments = self._get_adjustment_list(
asset, self._mergers_dict, "MERGERS"
)
for adj_dt, adj in merger_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
dividend_adjustments = self._get_adjustment_list(
asset, self._dividends_dict, "DIVIDENDS",
)
for adj_dt, adj in dividend_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
ratio = reduce(mul, adjustments_for_asset, 1.0)
adjustment_ratios_per_asset.append(ratio)
return adjustment_ratios_per_asset
def get_adjusted_value(self, asset, field, dt,
perspective_dt,
data_frequency,
spot_value=None):
"""
Returns a scalar value representing the value
of the desired asset's field at the given dt with adjustments applied.
Parameters
----------
asset : Asset
The asset whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The value of the given ``field`` for ``asset`` at ``dt`` with any
adjustments known by ``perspective_dt`` applied. The return type is
based on the ``field`` requested. If the field is one of 'open',
'high', 'low', 'close', or 'price', the value will be a float. If
the ``field`` is 'volume' the value will be a int. If the ``field``
is 'last_traded' the value will be a Timestamp.
"""
if spot_value is None:
# if this a fetcher field, we want to use perspective_dt (not dt)
# because we want the new value as of midnight (fetcher only works
# on a daily basis, all timestamps are on midnight)
if self._is_extra_source(asset, field,
self._augmented_sources_map):
spot_value = self.get_spot_value(asset, field, perspective_dt,
data_frequency)
else:
spot_value = self.get_spot_value(asset, field, dt,
data_frequency)
if isinstance(asset, Equity):
ratio = self.get_adjustments(asset, field, dt, perspective_dt)[0]
spot_value *= ratio
return spot_value
def _get_minute_spot_value_future(self, asset, column, dt):
# Futures bcolz files have 1440 bars per day (24 hours), 7 days a week.
# The file attributes contain the "start_dt" and "last_dt" fields,
# which represent the time period for this bcolz file.
# The start_dt is midnight of the first day that this future started
# trading.
# figure out the # of minutes between dt and this asset's start_dt
start_date = self._get_asset_start_date(asset)
minute_offset = int((dt - start_date).total_seconds() / 60)
if minute_offset < 0:
# asking for a date that is before the asset's start date, no dice
return 0.0
# then just index into the bcolz carray at that offset
carray = self._open_minute_file(column, asset)
result = carray[minute_offset]
# if there's missing data, go backwards until we run out of file
while result == 0 and minute_offset > 0:
minute_offset -= 1
result = carray[minute_offset]
if column != 'volume':
# FIXME switch to a futures reader
return result * 0.001
else:
return result
def _get_minute_spot_value(self, asset, column, dt, ffill=False):
result = self._equity_minute_reader.get_value(
asset.sid, dt, column
)
if column == "volume":
if result == 0:
return 0
elif not ffill or not np.isnan(result):
# if we're not forward filling, or we found a result, return it
return result
# we are looking for price, and didn't find one. have to go hunting.
last_traded_dt = \
self._equity_minute_reader.get_last_traded_dt(asset, dt)
if last_traded_dt is pd.NaT:
# no last traded dt, bail
return np.nan
# get the value as of the last traded dt
result = self._equity_minute_reader.get_value(
asset.sid,
last_traded_dt,
column
)
if np.isnan(result):
return np.nan
if dt == last_traded_dt or dt.date() == last_traded_dt.date():
return result
# the value we found came from a different day, so we have to adjust
# the data if there are any adjustments on that day barrier
return self.get_adjusted_value(
asset, column, last_traded_dt,
dt, "minute", spot_value=result
)
def _get_daily_data(self, asset, column, dt):
if column == "last_traded":
last_traded_dt = \
self._equity_daily_reader.get_last_traded_dt(asset, dt)
if pd.isnull(last_traded_dt):
return pd.NaT
else:
return last_traded_dt
elif column in OHLCV_FIELDS:
# don't forward fill
try:
val = self._equity_daily_reader.spot_price(asset, dt, column)
if val == -1:
if column == "volume":
return 0
else:
return np.nan
else:
return val
except NoDataOnDate:
return np.nan
elif column == "price":
found_dt = dt
while True:
try:
value = self._equity_daily_reader.spot_price(
asset, found_dt, "close"
)
if value != -1:
if dt == found_dt:
return value
else:
# adjust if needed
return self.get_adjusted_value(
asset, column, found_dt, dt, "minute",
spot_value=value
)
else:
found_dt -= tradingcalendar.trading_day
except NoDataOnDate:
return np.nan
@remember_last
def _get_days_for_window(self, end_date, bar_count):
tds = self.env.trading_days
end_loc = self.env.trading_days.get_loc(end_date)
start_loc = end_loc - bar_count + 1
if start_loc < 0:
raise HistoryWindowStartsBeforeData(
first_trading_day=self.env.first_trading_day.date(),
bar_count=bar_count,
suggested_start_day=tds[bar_count].date(),
)
return tds[start_loc:end_loc + 1]
def _get_history_daily_window(self, assets, end_dt, bar_count,
field_to_use):
"""
Internal method that returns a dataframe containing history bars
of daily frequency for the given sids.
"""
days_for_window = self._get_days_for_window(end_dt.date(), bar_count)
if len(assets) == 0:
return pd.DataFrame(None,
index=days_for_window,
columns=None)
future_data = []
eq_assets = []
for asset in assets:
if isinstance(asset, Future):
future_data.append(self._get_history_daily_window_future(
asset, days_for_window, end_dt, field_to_use
))
else:
eq_assets.append(asset)
eq_data = self._get_history_daily_window_equities(
eq_assets, days_for_window, end_dt, field_to_use
)
if future_data:
# TODO: This case appears to be uncovered by testing.
data = np.concatenate(eq_data, np.array(future_data).T)
else:
data = eq_data
return pd.DataFrame(
data,
index=days_for_window,
columns=assets
)
def _get_history_daily_window_future(self, asset, days_for_window,
end_dt, column):
# Since we don't have daily bcolz files for futures (yet), use minute
# bars to calculate the daily values.
data = []
data_groups = []
# get all the minutes for the days NOT including today
for day in days_for_window[:-1]:
minutes = self.env.market_minutes_for_day(day)
values_for_day = np.zeros(len(minutes), dtype=np.float64)
for idx, minute in enumerate(minutes):
minute_val = self._get_minute_spot_value_future(
asset, column, minute
)
values_for_day[idx] = minute_val
data_groups.append(values_for_day)
# get the minutes for today
last_day_minutes = pd.date_range(
start=self.env.get_open_and_close(end_dt)[0],
end=end_dt,
freq="T"
)
values_for_last_day = np.zeros(len(last_day_minutes), dtype=np.float64)
for idx, minute in enumerate(last_day_minutes):
minute_val = self._get_minute_spot_value_future(
asset, column, minute
)
values_for_last_day[idx] = minute_val
data_groups.append(values_for_last_day)
for group in data_groups:
if len(group) == 0:
continue
if column == 'volume':
data.append(np.sum(group))
elif column == 'open':
data.append(group[0])
elif column == 'close':
data.append(group[-1])
elif column == 'high':
data.append(np.amax(group))
elif column == 'low':
data.append(np.amin(group))
return data
def _get_history_daily_window_equities(
self, assets, days_for_window, end_dt, field_to_use):
ends_at_midnight = end_dt.hour == 0 and end_dt.minute == 0
if ends_at_midnight:
# two cases where we use daily data for the whole range:
# 1) the history window ends at midnight utc.
# 2) the last desired day of the window is after the
# last trading day, use daily data for the whole range.
return self._get_daily_window_for_sids(
assets,
field_to_use,
days_for_window,
extra_slot=False
)
else:
# minute mode, requesting '1d'
daily_data = self._get_daily_window_for_sids(
assets,
field_to_use,
days_for_window[0:-1]
)
if field_to_use == 'open':
minute_value = self._equity_daily_aggregator.opens(
assets, end_dt)
elif field_to_use == 'high':
minute_value = self._equity_daily_aggregator.highs(
assets, end_dt)
elif field_to_use == 'low':
minute_value = self._equity_daily_aggregator.lows(
assets, end_dt)
elif field_to_use == 'close':
minute_value = self._equity_daily_aggregator.closes(
assets, end_dt)
elif field_to_use == 'volume':
minute_value = self._equity_daily_aggregator.volumes(
assets, end_dt)
# append the partial day.
daily_data[-1] = minute_value
return daily_data
def _get_history_minute_window(self, assets, end_dt, bar_count,
field_to_use):
"""
Internal method that returns a dataframe containing history bars
of minute frequency for the given sids.
"""
# get all the minutes for this window
mm = self.env.market_minutes
end_loc = mm.get_loc(end_dt)
start_loc = end_loc - bar_count + 1
if start_loc < 0:
suggested_start_day = (mm[bar_count] + self.env.trading_day).date()
raise HistoryWindowStartsBeforeData(
first_trading_day=self.env.first_trading_day.date(),
bar_count=bar_count,
suggested_start_day=suggested_start_day,
)
minutes_for_window = mm[start_loc:end_loc + 1]
asset_minute_data = self._get_minute_window_for_assets(
assets,
field_to_use,
minutes_for_window,
)
return pd.DataFrame(
asset_minute_data,
index=minutes_for_window,
columns=assets
)
def get_history_window(self, assets, end_dt, bar_count, frequency, field,
ffill=True):
"""
Public API method that returns a dataframe containing the requested
history window. Data is fully adjusted.
Parameters
----------
assets : list of zipline.data.Asset objects
The assets whose data is desired.
bar_count: int
The number of bars desired.
frequency: string
"1d" or "1m"
field: string
The desired field of the asset.
ffill: boolean
Forward-fill missing values. Only has effect if field
is 'price'.
Returns
-------
A dataframe containing the requested data.
"""
if field not in OHLCVP_FIELDS:
raise ValueError("Invalid field: {0}".format(field))
if frequency == "1d":
if field == "price":
df = self._get_history_daily_window(assets, end_dt, bar_count,
"close")
else:
df = self._get_history_daily_window(assets, end_dt, bar_count,
field)
elif frequency == "1m":
if field == "price":
df = self._get_history_minute_window(assets, end_dt, bar_count,
"close")
else:
df = self._get_history_minute_window(assets, end_dt, bar_count,
field)
else:
raise ValueError("Invalid frequency: {0}".format(frequency))
# forward-fill price
if field == "price":
if frequency == "1m":
data_frequency = 'minute'
elif frequency == "1d":
data_frequency = 'daily'
else:
raise Exception(
"Only 1d and 1m are supported for forward-filling.")
dt_to_fill = df.index[0]
perspective_dt = df.index[-1]
assets_with_leading_nan = np.where(pd.isnull(df.iloc[0]))[0]
for missing_loc in assets_with_leading_nan:
asset = assets[missing_loc]
previous_dt = self.get_last_traded_dt(
asset, dt_to_fill, data_frequency)
if pd.isnull(previous_dt):
continue
previous_value = self.get_adjusted_value(
asset,
field,
previous_dt,
perspective_dt,
data_frequency,
)
df.iloc[0, missing_loc] = previous_value
df.fillna(method='ffill', inplace=True)
for asset in df.columns:
if df.index[-1] >= asset.end_date:
# if the window extends past the asset's end date, set
# all post-end-date values to NaN in that asset's series
series = df[asset]
series[series.index.normalize() > asset.end_date] = np.NaN
return df
def _get_minute_window_for_assets(self, assets, field, minutes_for_window):
"""
Internal method that gets a window of adjusted minute data for an asset
and specified date range. Used to support the history API method for
minute bars.
Missing bars are filled with NaN.
Parameters
----------
asset : Asset
The asset whose data is desired.
field: string
The specific field to return. "open", "high", "close_price", etc.
minutes_for_window: pd.DateTimeIndex
The list of minutes representing the desired window. Each minute
is a pd.Timestamp.
Returns
-------
A numpy array with requested values.
"""
if isinstance(assets, Future):
return self._get_minute_window_for_future([assets], field,
minutes_for_window)
else:
# TODO: Make caller accept assets.
window = self._get_minute_window_for_equities(assets, field,
minutes_for_window)
return window
def _get_minute_window_for_future(self, asset, field, minutes_for_window):
# THIS IS TEMPORARY. For now, we are only exposing futures within
# equity trading hours (9:30 am to 4pm, Eastern). The easiest way to
# do this is to simply do a spot lookup for each desired minute.
return_data = np.zeros(len(minutes_for_window), dtype=np.float64)
for idx, minute in enumerate(minutes_for_window):
return_data[idx] = \
self._get_minute_spot_value_future(asset, field, minute)
# Note: an improvement could be to find the consecutive runs within
# minutes_for_window, and use them to read the underlying ctable
# more efficiently.
# Once futures are on 24-hour clock, then we can just grab all the
# requested minutes in one shot from the ctable.
# no adjustments for futures, yay.
return return_data
def _get_minute_window_for_equities(
self, assets, field, minutes_for_window):
return self._equity_minute_history_loader.history(assets,
minutes_for_window,
field)
def _apply_all_adjustments(self, data, asset, dts, field,
price_adj_factor=1.0):
"""
Internal method that applies all the necessary adjustments on the
given data array.
The adjustments are:
- splits
- if field != "volume":
- mergers
- dividends
- * 0.001
- any zero fields replaced with NaN
- all values rounded to 3 digits after the decimal point.
Parameters
----------
data : np.array
The data to be adjusted.
asset: Asset
The asset whose data is being adjusted.
dts: pd.DateTimeIndex
The list of minutes or days representing the desired window.
field: string
The field whose values are in the data array.
price_adj_factor: float
Factor with which to adjust OHLC values.
Returns
-------
None. The data array is modified in place.
"""
self._apply_adjustments_to_window(
self._get_adjustment_list(
asset, self._splits_dict, "SPLITS"
),
data,
dts,
field != 'volume'
)
if field != 'volume':
self._apply_adjustments_to_window(
self._get_adjustment_list(
asset, self._mergers_dict, "MERGERS"
),
data,
dts,
True
)
self._apply_adjustments_to_window(
self._get_adjustment_list(
asset, self._dividends_dict, "DIVIDENDS"
),
data,
dts,
True
)
if price_adj_factor is not None:
data *= price_adj_factor
np.around(data, 3, out=data)
def _get_daily_window_for_sids(
self, assets, field, days_in_window, extra_slot=True):
"""
Internal method that gets a window of adjusted daily data for a sid
and specified date range. Used to support the history API method for
daily bars.
Parameters
----------
asset : Asset
The asset whose data is desired.
start_dt: pandas.Timestamp
The start of the desired window of data.
bar_count: int
The number of days of data to return.
field: string
The specific field to return. "open", "high", "close_price", etc.
extra_slot: boolean
Whether to allocate an extra slot in the returned numpy array.
This extra slot will hold the data for the last partial day. It's
much better to create it here than to create a copy of the array
later just to add a slot.
Returns
-------
A numpy array with requested values. Any missing slots filled with
nan.
"""
bar_count = len(days_in_window)
# create an np.array of size bar_count
if extra_slot:
return_array = np.zeros((bar_count + 1, len(assets)))
else:
return_array = np.zeros((bar_count, len(assets)))
if field != "volume":
# volumes default to 0, so we don't need to put NaNs in the array
return_array[:] = np.NAN
if bar_count != 0:
data = self._equity_history_loader.history(assets,
days_in_window,
field)
if extra_slot:
return_array[:len(return_array) - 1, :] = data
else:
return_array[:len(data)] = data
return return_array
@staticmethod
def _apply_adjustments_to_window(adjustments_list, window_data,
dts_in_window, multiply):
if len(adjustments_list) == 0:
return
# advance idx to the correct spot in the adjustments list, based on
# when the window starts
idx = 0
while idx < len(adjustments_list) and dts_in_window[0] >\
adjustments_list[idx][0]:
idx += 1
# if we've advanced through all the adjustments, then there's nothing
# to do.
if idx == len(adjustments_list):
return
while idx < len(adjustments_list):
adjustment_to_apply = adjustments_list[idx]
if adjustment_to_apply[0] > dts_in_window[-1]:
break
range_end = dts_in_window.searchsorted(adjustment_to_apply[0])
if multiply:
window_data[0:range_end] *= adjustment_to_apply[1]
else:
window_data[0:range_end] /= adjustment_to_apply[1]
idx += 1
def _get_adjustment_list(self, asset, adjustments_dict, table_name):
"""
Internal method that returns a list of adjustments for the given sid.
Parameters
----------
asset : Asset
The asset for which to return adjustments.
adjustments_dict: dict
A dictionary of sid -> list that is used as a cache.
table_name: string
The table that contains this data in the adjustments db.
Returns
-------
adjustments: list
A list of [multiplier, pd.Timestamp], earliest first
"""
if self._adjustment_reader is None:
return []
sid = int(asset)
try:
adjustments = adjustments_dict[sid]
except KeyError:
adjustments = adjustments_dict[sid] = self._adjustment_reader.\
get_adjustments_for_sid(table_name, sid)
return adjustments
def _check_is_currently_alive(self, asset, dt):
sid = int(asset)
if sid not in self._asset_start_dates:
self._get_asset_start_date(asset)
start_date = self._asset_start_dates[sid]
if self._asset_start_dates[sid] > dt:
raise NoTradeDataAvailableTooEarly(
sid=sid,
dt=normalize_date(dt),
start_dt=start_date
)
end_date = self._asset_end_dates[sid]
if self._asset_end_dates[sid] < dt:
raise NoTradeDataAvailableTooLate(
sid=sid,
dt=normalize_date(dt),
end_dt=end_date
)
def _get_asset_start_date(self, asset):
self._ensure_asset_dates(asset)
return self._asset_start_dates[asset]
def _get_asset_end_date(self, asset):
self._ensure_asset_dates(asset)
return self._asset_end_dates[asset]
def _ensure_asset_dates(self, asset):
sid = int(asset)
if sid not in self._asset_start_dates:
if self._first_trading_day is not None:
self._asset_start_dates[sid] = \
max(asset.start_date, self._first_trading_day)
else:
self._asset_start_dates[sid] = asset.start_date
self._asset_end_dates[sid] = asset.end_date
def get_splits(self, sids, dt):
"""
Returns any splits for the given sids and the given dt.
Parameters
----------
sids : container
Sids for which we want splits.
dt : pd.Timestamp
The date for which we are checking for splits. Note: this is
expected to be midnight UTC.
Returns
-------
splits : list[(int, float)]
List of splits, where each split is a (sid, ratio) tuple.
"""
if self._adjustment_reader is None or not sids:
return {}
# convert dt to # of seconds since epoch, because that's what we use
# in the adjustments db
seconds = int(dt.value / 1e9)
splits = self._adjustment_reader.conn.execute(
"SELECT sid, ratio FROM SPLITS WHERE effective_date = ?",
(seconds,)).fetchall()
splits = [split for split in splits if split[0] in sids]
return splits
def get_stock_dividends(self, sid, trading_days):
"""
Returns all the stock dividends for a specific sid that occur
in the given trading range.
Parameters
----------
sid: int
The asset whose stock dividends should be returned.
trading_days: pd.DatetimeIndex
The trading range.
Returns
-------
list: A list of objects with all relevant attributes populated.
All timestamp fields are converted to pd.Timestamps.
"""
if self._adjustment_reader is None:
return []
if len(trading_days) == 0:
return []
start_dt = trading_days[0].value / 1e9
end_dt = trading_days[-1].value / 1e9
dividends = self._adjustment_reader.conn.execute(
"SELECT * FROM stock_dividend_payouts WHERE sid = ? AND "
"ex_date > ? AND pay_date < ?", (int(sid), start_dt, end_dt,)).\
fetchall()
dividend_info = []
for dividend_tuple in dividends:
dividend_info.append({
"declared_date": dividend_tuple[1],
"ex_date": pd.Timestamp(dividend_tuple[2], unit="s"),
"pay_date": pd.Timestamp(dividend_tuple[3], unit="s"),
"payment_sid": dividend_tuple[4],
"ratio": dividend_tuple[5],
"record_date": pd.Timestamp(dividend_tuple[6], unit="s"),
"sid": dividend_tuple[7]
})
return dividend_info
def contains(self, asset, field):
return field in BASE_FIELDS or \
(field in self._augmented_sources_map and
asset in self._augmented_sources_map[field])
def get_fetcher_assets(self, dt):
"""
Returns a list of assets for the current date, as defined by the
fetcher data.
Returns
-------
list: a list of Asset objects.
"""
# return a list of assets for the current date, as defined by the
# fetcher source
if self._extra_source_df is None:
return []
day = normalize_date(dt)
if day in self._extra_source_df.index:
assets = self._extra_source_df.loc[day]['sid']
else:
return []
if isinstance(assets, pd.Series):
return [x for x in assets if isinstance(x, Asset)]
else:
return [assets] if isinstance(assets, Asset) else []
@weak_lru_cache(20)
def _get_minute_count_for_transform(self, ending_minute, days_count):
# cache size picked somewhat loosely. this code exists purely to
# handle deprecated API.
# bars is the number of days desired. we have to translate that
# into the number of minutes we want.
# we get all the minutes for the last (bars - 1) days, then add
# all the minutes so far today. the +2 is to account for ignoring
# today, and the previous day, in doing the math.
previous_day = self.env.previous_trading_day(ending_minute)
days = self.env.days_in_range(
self.env.add_trading_days(-days_count + 2, previous_day),
previous_day,
)
minutes_count = \
sum(210 if day in self.env.early_closes else 390 for day in days)
# add the minutes for today
today_open = self.env.get_open_and_close(ending_minute)[0]
minutes_count += \
((ending_minute - today_open).total_seconds() // 60) + 1
return minutes_count
def get_simple_transform(self, asset, transform_name, dt, data_frequency,
bars=None):
if transform_name == "returns":
# returns is always calculated over the last 2 days, regardless
# of the simulation's data frequency.
hst = self.get_history_window(
[asset], dt, 2, "1d", "price", ffill=True
)[asset]
return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]
if bars is None:
raise ValueError("bars cannot be None!")
if data_frequency == "minute":
freq_str = "1m"
calculated_bar_count = self._get_minute_count_for_transform(
dt, bars
)
else:
freq_str = "1d"
calculated_bar_count = bars
price_arr = self.get_history_window(
[asset], dt, calculated_bar_count, freq_str, "price", ffill=True
)[asset]
if transform_name == "mavg":
return nanmean(price_arr)
elif transform_name == "stddev":
return nanstd(price_arr, ddof=1)
elif transform_name == "vwap":
volume_arr = self.get_history_window(
[asset], dt, calculated_bar_count, freq_str, "volume",
ffill=True
)[asset]
vol_sum = nansum(volume_arr)
try:
ret = nansum(price_arr * volume_arr) / vol_sum
except ZeroDivisionError:
ret = np.nan
return ret
| apache-2.0 |
schets/scikit-learn | examples/mixture/plot_gmm_sin.py | 248 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
waterponey/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 55 | 2433 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plt.scatter(X[labels == outer, 0], X[labels == outer, 1], color='navy',
marker='s', lw=0, label="outer labeled", s=10)
plt.scatter(X[labels == inner, 0], X[labels == inner, 1], color='c',
marker='s', lw=0, label='inner labeled', s=10)
plt.scatter(X[labels == -1, 0], X[labels == -1, 1], color='darkorange',
marker='.', label='unlabeled')
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Raw data (2 classes=outer and inner)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plt.scatter(X[outer_numbers, 0], X[outer_numbers, 1], color='navy',
marker='s', lw=0, s=10, label="outer learned")
plt.scatter(X[inner_numbers, 0], X[inner_numbers, 1], color='c',
marker='s', lw=0, s=10, label="inner learned")
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
shangwuhencc/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
winklerand/pandas | pandas/tests/test_errors.py | 9 | 1147 | # -*- coding: utf-8 -*-
import pytest
from warnings import catch_warnings
import pandas # noqa
import pandas as pd
@pytest.mark.parametrize(
"exc", ['UnsupportedFunctionCall', 'UnsortedIndexError',
'OutOfBoundsDatetime',
'ParserError', 'PerformanceWarning', 'DtypeWarning',
'EmptyDataError', 'ParserWarning', 'MergeError'])
def test_exception_importable(exc):
from pandas import errors
e = getattr(errors, exc)
assert e is not None
# check that we can raise on them
with pytest.raises(e):
raise e()
def test_catch_oob():
from pandas import errors
try:
pd.Timestamp('15000101')
except errors.OutOfBoundsDatetime:
pass
def test_error_rename():
# see gh-12665
from pandas.errors import ParserError
from pandas.io.common import CParserError
try:
raise CParserError()
except ParserError:
pass
try:
raise ParserError()
except CParserError:
pass
with catch_warnings(record=True):
try:
raise ParserError()
except pd.parser.CParserError:
pass
| bsd-3-clause |
samuel1208/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
vigilv/scikit-learn | sklearn/datasets/base.py | 196 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
refstudycentre/versification | util.py | 1 | 11774 |
import numpy as np
import unicodecsv
import codecs
import goslate
import sqlite3
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
def imp_load(filename):
texts = []
books = []
chapters = []
verses = []
# Read in a whole bible
with codecs.open(filename,encoding='utf-8') as f:
bibletext = f.read()
# Split by verse
bible_verses = bibletext.split('$$$')
# Process verses
for verse in bible_verses:
try:
verse = verse.split('\n',1)
ref = verse[0].strip()
text = verse[1].strip()
ref = ref.split('.')
book = ref[0].strip()
cnum = ref[1].strip()
vnum = ref[2].strip()
texts.append(text)
books.append(book)
chapters.append(cnum)
verses.append(vnum)
except IndexError:
pass
return books, chapters, verses, texts
def calculate_similarity(texts, translations):
# Train the tf-idf thingy on the translated texts
tfidf = TfidfVectorizer().fit_transform(texts)
# Build a matrix representation of the similarities between verses
# This will yield a simmetrical matrix
# TODO: For performance and logical reasons: Only calculate similarity for nearby verses, assume others 0 ?
M = np.array([linear_kernel(tfidf[j:j+1], tfidf).flatten() for j in range(len(texts))])
# Hack(ish): Set similarity with verses of same translation to 0
for i in range(len(M)):
for j in range(i+1):
if translations[i] == translations[j]:
M[i][j] = M[j][i] = 0
# print np.round(M*100,0)
return M
def find_best_couple(M,t):
"""
find best couple in similarity matrix M
the translation(s) of each verse is given in t
"""
# assume values are 0 for verses in same translation
i_max, j_max = np.unravel_index(M.argmax(), M.shape)
P_max = M[i_max, j_max]
return i_max, j_max, P_max
def merge_nodes(M,a,b):
"""
merge indices a and b in similarity matrix M into one supernode,
averaging similarity values between the supernode and other verses
"""
N = len(M)
# calculate a new row (and column) for the supernode
supernode_similarity = [np.average([M[k][a],M[k][b]]) for k in range(N)]
# append the row (this will jumble the verse order...)
newM = np.append(M, np.array(supernode_similarity)[None,:], axis=0)
# append 0 (supernode's similarity with itself) to the row and add it as a column
supernode_similarity.append(0.)
newM = np.append(newM, np.array(supernode_similarity)[:,None], axis=1)
# to preserve verse indices, don't delete
# newM = np.delete(newM,[a,b],axis=0)
# rather make rows a and b 0
# to preserve verse indices, don't delete
# newM = np.delete(newM,[a,b],axis=1)
# rather make columns a and b 0
newM[:,a] = np.zeros_like(newM[:,a])
newM[:,b] = np.zeros_like(newM[:,b])
newM[a,:] = np.zeros_like(newM[a,:])
newM[b,:] = np.zeros_like(newM[b,:])
return newM
def group_verses(M, t, numT, P_min = 0.1):
"""
Automatically group verses
t = the translation of each verse
numT = max number of verses in a group = number of translations
"""
t = [[val] for val in t]
N = len(M)
groups = {} # keyed by supernode index
iteration = 0
max_iteration = N
while iteration < max_iteration:
iteration += 1
#print "\t\tGrouping: iteration ",iteration
i,j,P = find_best_couple(M, t)
#print "\t\tbest couple: ",i,j,P
# Stop iterating if similarity gets too low...
if P < P_min:
break;
group = []
# merge supernodes if they exist, else merge nodes:
if i in groups:
group.extend(groups[i])
else:
group.append(i)
if j in groups:
group.extend(groups[j])
else:
group.append(j)
# group now contains all of the verses for the new supernode
if len(group) > numT:
# this grouping is invalid
# prevent it from happening again by making P 0
M[i][j] = 0
else:
# valid grouping. save it.
# Remove the previous supernode groups
if i in groups:
del groups[i]
if j in groups:
del groups[j]
# Create the supernode
M = merge_nodes(M,i,j)
t.append(t[i] + t[j])
# Save the index of the new supernode
supernode_index = len(M)-1
groups[supernode_index] = group
print "\r\t\t",len(groups),
print
return groups
def align(input_translations, input_filenames, output_filename):
"""
Load one csv file for each translation
Group, align and sort the verses
Export a csv file containing a column for each translation
"""
if len(input_translations) != len(input_filenames):
raise ValueError("Number of translations and number of files must be the same")
M = len(input_translations)
# Load pre-translated data
print "\tLoading data from files..."
#translations,books,chapters,verses,texts_original,texts_en = load_translated_verses(input_translations, input_filenames)
translations,chapters,verses,texts_original,texts_en = csv_import_translated_books(input_filenames, input_translations)
# Calculate similarity between verses
print "\tCalculating similarity matrix..."
similarity = calculate_similarity(texts_en, translations)
def canonical_group_cmp(a, b):
"""
Define sort order for groups of verses
"""
# find two verses from the same translation to compare their canonical order
for i in a:
for j in b:
if translations[i] == translations[j]:
return i - j
# Group the verses
print "\tGrouping verses..."
groups = group_verses(similarity, translations, 3).values()
# print groups
# Put groups back into canonical order
print "\tSorting verses..."
groups.sort(canonical_group_cmp)
# prepare data for csv export
print "\tPreparing csv data..."
csv_rows = []
csv_rows.append(input_translations) # headers
for group in groups:
# create a row in the csv file for every group
if len(group) == M:
# rows where all translations are present, are quick:
group.sort()
row = [u"{0}:{1}:{2}".format(chapters[verse],verses[verse],texts_original[verse]) for verse in group]
else:
# for other rows, we have to find the missing translation, and substitute it with a blank
row = []
for translation in input_translations:
found = False
for verse in group:
if translation == translations[verse]:
# verse found for this translation
row.append(u"{0}:{1}:{2}".format(chapters[verse],verses[verse],texts_original[verse]))
found = True
break
if not found:
# fill in a blank
row.append("")
csv_rows.append(row)
# print csv_rows
# Export to csv file
print "\tWriting csv file..."
with open(output_filename,'wb') as f:
cw = unicodecsv.writer(f, encoding='utf-8')
cw.writerows(csv_rows)
print "\tDone!"
def translate_csv(in_filename, language, out_filename):
"""
Load a bible book from csv file
translate it
save it as a new file
"""
# Create a translator object
gs = goslate.Goslate(retry_times=100, timeout=100)
# Load the bible book to be translated
chapters,verses,texts_original = csv_import_book(in_filename)
# Batch translate the verses if necessary
if language != 'en':
print "Batch translating {0} verses from '{1}' to 'en'".format(len(texts_original), language)
texts_translated = gs.translate(texts_original, 'en', language)
else:
print "Not translating {0} verses already in 'en'".format(len(texts_original))
texts_translated = texts_original
# Write to CSV file
rows = zip(chapters, verses, texts_original, texts_translated)
with open(out_filename,'wb') as f:
cw = unicodecsv.writer(f, encoding='utf-8')
cw.writerow(['chapter','verse','text_original','text_english'])
cw.writerows(rows)
def csv_import_book(filename):
"""
load bible book from csv file
"""
texts = []
chapters = []
verses = []
# Read in a whole file of verses
with open(filename,'rb') as f:
cr = unicodecsv.reader(f, encoding='utf-8')
header = cr.next() # skip header
# Process verses
for cnum,vnum,text in cr:
chapters.append(int(cnum)) # parse integer
verses.append(int(vnum)) # parse integer
texts.append(text.strip()) # remove surrounding whitespace
# return results
return chapters,verses,texts
def csv_export_book(filename, rows=[], chapters=[], verses=[], texts=[]):
if not len(rows) > 0:
rows = zip(chapters, verses, texts)
with open(filename,'wb') as f:
cw = unicodecsv.writer(f,encoding='utf-8')
cw.writerow(['chapter','verse','text'])
cw.writerows(rows)
def csv_import_translated_book(input_file):
"""
import a single translated book from a single translation from single csv file
"""
texts_en = []
texts_original = []
chapters = []
verses = []
# Read in a whole (Google translated) file of verses
with open(input_file, 'rb') as f:
cr = unicodecsv.reader(f, encoding='utf-8')
header = cr.next() # skip header
# Process verses
for cnum,vnum,text_original,text_en in cr:
chapters.append(int(cnum))
verses.append(int(vnum))
texts_original.append(text_original.strip())
texts_en.append(text_en.strip())
# return results
return chapters,verses,texts_original,texts_en
def csv_import_translated_books(input_files, input_translations):
"""
import a single book from M translations from M csv files
"""
if len(input_files) != len(input_translations):
raise ValueError("Number of input files and translations are not the same")
translations = []
chapters = []
verses = []
texts_original = []
texts_en = []
for in_file,translation in zip(input_files,input_translations):
c,v,o,e = csv_import_translated_book(in_file)
chapters.extend(c)
verses.extend(v)
texts_original.extend(o)
texts_en.extend(e)
translations.extend([translation]*len(e))
return translations,chapters,verses,texts_original,texts_en
def csv_import_aligned_book(input_file):
"""
Import a single aligned book (e.g. after it is checked by humans)
"""
groups = []
with open(input_file, 'rb') as f:
cr = unicodecsv.reader(f, encoding='utf-8')
translations = cr.next() # header contains translation names
for row in cr:
group = {}
for i in range(len(translations)):
verse = row[i].split(':',3)
group[translations[i]] = {
'chapternum':int(verse[0]),
'versenum':int(verse[1]),
'text':verse[2].strip()
}
groups.append(group)
return groups | gpl-2.0 |
oesteban/mriqc | mriqc/qc/anatomical.py | 1 | 21553 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# pylint: disable=no-member
r"""
Measures based on noise measurements
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. _iqms_cjv:
- :py:func:`~mriqc.qc.anatomical.cjv` -- **coefficient of joint variation**
(:abbr:`CJV (coefficient of joint variation)`):
The ``cjv`` of GM and WM was proposed as objective function by [Ganzetti2016]_ for
the optimization of :abbr:`INU (intensity non-uniformity)` correction algorithms.
Higher values are related to the presence of heavy head motion and large
:abbr:`INU (intensity non-uniformity)` artifacts. Lower values are better.
.. _iqms_cnr:
- :py:func:`~mriqc.qc.anatomical.cnr` -- **contrast-to-noise ratio**
(:abbr:`CNR (contrast-to-noise ratio)`): The ``cnr`` [Magnota2006]_,
is an extension of the :abbr:`SNR (signal-to-noise Ratio)` calculation
to evaluate how separated the tissue distributions of GM and WM are.
Higher values indicate better quality.
.. _iqms_snr:
- :py:func:`~mriqc.qc.anatomical.snr` -- **signal-to-noise ratio**
(:abbr:`SNR (signal-to-noise ratio)`): calculated within the
tissue mask.
.. _iqms_snrd:
- :py:func:`~mriqc.qc.anatomical.snr_dietrich`: **Dietrich's SNR**
(:abbr:`SNRd (signal-to-noise ratio, Dietrich 2007)`) as proposed
by [Dietrich2007]_, using the air background as reference.
.. _iqms_qi2:
- :py:func:`~mriqc.qc.anatomical.art_qi2`: **Mortamet's quality index 2**
(:abbr:`QI2 (quality index 2)`) is a calculation of the goodness-of-fit
of a :math:`\chi^2` distribution on the air mask,
once the artifactual intensities detected for computing
the :abbr:`QI1 (quality index 1)` index have been removed [Mortamet2009]_.
Measures based on information theory
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. _iqms_efc:
- :py:func:`~mriqc.qc.anatomical.efc`:
The :abbr:`EFC (Entropy Focus Criterion)`
[Atkinson1997]_ uses the Shannon entropy of voxel intensities as
an indication of ghosting and blurring induced by head motion.
Lower values are better.
The original equation is normalized by the maximum entropy, so that the
:abbr:`EFC (Entropy Focus Criterion)` can be compared across images with
different dimensions.
.. _iqms_fber:
- :py:func:`~mriqc.qc.anatomical.fber`:
The :abbr:`FBER (Foreground-Background Energy Ratio)` [Shehzad2015]_,
defined as the mean energy of image values within the head relative
to outside the head [QAP-measures]_.
Higher values are better.
Measures targeting specific artifacts
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. _iqms_inu:
- **inu_\*** (*nipype interface to N4ITK*): summary statistics (max, min and median)
of the :abbr:`INU (intensity non-uniformity)` field as extracted by the N4ITK algorithm
[Tustison2010]_. Values closer to 1.0 are better.
.. _iqms_qi:
- :py:func:`~mriqc.qc.anatomical.art_qi1`:
Detect artifacts in the image using the method described in [Mortamet2009]_.
The :abbr:`QI1 (quality index 1)` is the proportion of voxels with intensity
corrupted by artifacts normalized by the number of voxels in the background.
Lower values are better.
.. figure:: ../resources/mortamet-mrm2009.png
The workflow to compute the artifact detection from [Mortamet2009]_.
.. _iqms_wm2max:
- :py:func:`~mriqc.qc.anatomical.wm2max`:
The white-matter to maximum intensity ratio is the median intensity
within the WM mask over the 95% percentile of the full intensity
distribution, that captures the existence of long tails due to
hyper-intensity of the carotid vessels and fat. Values
should be around the interval [0.6, 0.8].
Other measures
^^^^^^^^^^^^^^
.. _iqms_fwhm:
- **fwhm** (*nipype interface to AFNI*): The :abbr:`FWHM (full-width half maximum)` of
the spatial distribution of the image intensity values in units of voxels [Forman1995]_.
Lower values are better. Uses the gaussian width estimator filter implemented in
AFNI's ``3dFWHMx``:
.. math ::
\text{FWHM} = \sqrt{-{\left[4 \ln{(1-\frac{\sigma^2_{X^m_{i+1,j}-X^m_{i,j}}}
{2\sigma^2_{X^m_{i,j}}}})\right]}^{-1}}
.. _iqms_icvs:
- :py:func:`~mriqc.qc.anatomical.volume_fraction` (**icvs_\***):
the
:abbr:`ICV (intracranial volume)` fractions of :abbr:`CSF (cerebrospinal fluid)`,
:abbr:`GM (gray-matter)` and :abbr:`WM (white-matter)`. They should move within
a normative range.
.. _iqms_rpve:
- :py:func:`~mriqc.qc.anatomical.rpve` (**rpve_\***): the
:abbr:`rPVe (residual partial voluming error)` of :abbr:`CSF (cerebrospinal fluid)`,
:abbr:`GM (gray-matter)` and :abbr:`WM (white-matter)`. Lower values are better.
.. _iqms_summary:
- :py:func:`~mriqc.qc.anatomical.summary_stats` (**summary_\*_\***):
Mean, standard deviation, 5% percentile and 95% percentile of the distribution
of background, :abbr:`CSF (cerebrospinal fluid)`, :abbr:`GM (gray-matter)` and
:abbr:`WM (white-matter)`.
.. _iqms_tpm:
- **overlap_\*_\***:
The overlap of the :abbr:`TPMs (tissue probability maps)` estimated from the image and
the corresponding maps from the ICBM nonlinear-asymmetric 2009c template.
.. math ::
\text{JI}^k = \frac{\sum_i \min{(\text{TPM}^k_i, \text{MNI}^k_i)}}
{\sum_i \max{(\text{TPM}^k_i, \text{MNI}^k_i)}}
.. topic:: References
.. [Dietrich2007] Dietrich et al., *Measurement of SNRs in MR images: influence
of multichannel coils, parallel imaging and reconstruction filters*, JMRI 26(2):375--385.
2007. doi:`10.1002/jmri.20969 <http://dx.doi.org/10.1002/jmri.20969>`_.
.. [Ganzetti2016] Ganzetti et al., *Intensity inhomogeneity correction of structural MR images:
a data-driven approach to define input algorithm parameters*. Front Neuroinform 10:10. 2016.
doi:`10.3389/finf.201600010 <http://dx.doi.org/10.3389/finf.201600010>`_.
.. [Magnota2006] Magnotta, VA., & Friedman, L., *Measurement of signal-to-noise
and contrast-to-noise in the fBIRN multicenter imaging study*.
J Dig Imag 19(2):140-147, 2006. doi:`10.1007/s10278-006-0264-x
<http://dx.doi.org/10.1007/s10278-006-0264-x>`_.
.. [Mortamet2009] Mortamet B et al., *Automatic quality assessment in
structural brain magnetic resonance imaging*, Mag Res Med 62(2):365-372,
2009. doi:`10.1002/mrm.21992 <http://dx.doi.org/10.1002/mrm.21992>`_.
.. [Tustison2010] Tustison NJ et al., *N4ITK: improved N3 bias correction*,
IEEE Trans Med Imag, 29(6):1310-20,
2010. doi:`10.1109/TMI.2010.2046908 <http://dx.doi.org/10.1109/TMI.2010.2046908>`_.
.. [Shehzad2015] Shehzad Z et al., *The Preprocessed Connectomes Project
Quality Assessment Protocol - a resource for measuring the quality of MRI data*,
Front. Neurosci. Conference Abstract: Neuroinformatics 2015.
doi:`10.3389/conf.fnins.2015.91.00047 <https://doi.org/10.3389/conf.fnins.2015.91.00047>`_.
.. [Forman1995] Forman SD et al., *Improved assessment of significant activation in functional
magnetic resonance imaging (fMRI): use of a cluster-size threshold*,
Magn. Reson. Med. 33 (5), 636–647, 1995.
doi:`10.1002/mrm.1910330508 <https://doi.org/10.1002/mrm.1910330508>`_.
mriqc.qc.anatomical module
^^^^^^^^^^^^^^^^^^^^^^^^^^
"""
import os.path as op
from sys import version_info
from math import pi, sqrt
import numpy as np
import scipy.ndimage as nd
from scipy.stats import kurtosis # pylint: disable=E0611
from io import open # pylint: disable=W0622
from builtins import zip, range # pylint: disable=W0622
from six import string_types
DIETRICH_FACTOR = 1.0 / sqrt(2 / (4 - pi))
FSL_FAST_LABELS = {'csf': 1, 'gm': 2, 'wm': 3, 'bg': 0}
PY3 = version_info[0] > 2
def snr(mu_fg, sigma_fg, n):
r"""
Calculate the :abbr:`SNR (Signal-to-Noise Ratio)`.
The estimation may be provided with only one foreground region in
which the noise is computed as follows:
.. math::
\text{SNR} = \frac{\mu_F}{\sigma_F\sqrt{n/(n-1)}},
where :math:`\mu_F` is the mean intensity of the foreground and
:math:`\sigma_F` is the standard deviation of the same region.
:param float mu_fg: mean of foreground.
:param float sigma_fg: standard deviation of foreground.
:param int n: number of voxels in foreground mask.
:return: the computed SNR
"""
return float(mu_fg / (sigma_fg * sqrt(n / (n - 1))))
def snr_dietrich(mu_fg, sigma_air):
r"""
Calculate the :abbr:`SNR (Signal-to-Noise Ratio)`.
This must be an air mask around the head, and it should not contain artifacts.
The computation is done following the eq. A.12 of [Dietrich2007]_, which
includes a correction factor in the estimation of the standard deviation of
air and its Rayleigh distribution:
.. math::
\text{SNR} = \frac{\mu_F}{\sqrt{\frac{2}{4-\pi}}\,\sigma_\text{air}}.
:param float mu_fg: mean of foreground.
:param float sigma_air: standard deviation of the air surrounding the head ("hat" mask).
:return: the computed SNR for the foreground segmentation
"""
if sigma_air < 1.0:
from .. import MRIQC_LOG
MRIQC_LOG.warning('SNRd - background sigma is too small (%f)', sigma_air)
sigma_air += 1.0
return float(DIETRICH_FACTOR * mu_fg / sigma_air)
def cnr(mu_wm, mu_gm, sigma_air):
r"""
Calculate the :abbr:`CNR (Contrast-to-Noise Ratio)` [Magnota2006]_.
Higher values are better.
.. math::
\text{CNR} = \frac{|\mu_\text{GM} - \mu_\text{WM} |}{\sqrt{\sigma_B^2 +
\sigma_\text{WM}^2 + \sigma_\text{GM}^2}},
where :math:`\sigma_B` is the standard deviation of the noise distribution within
the air (background) mask.
:param float mu_wm: mean of signal within white-matter mask.
:param float mu_gm: mean of signal within gray-matter mask.
:param float sigma_air: standard deviation of the air surrounding the head ("hat" mask).
:return: the computed CNR
"""
return float(abs(mu_wm - mu_gm) / sigma_air)
def cjv(mu_wm, mu_gm, sigma_wm, sigma_gm):
r"""
Calculate the :abbr:`CJV (coefficient of joint variation)`, a measure
related to :abbr:`SNR (Signal-to-Noise Ratio)` and
:abbr:`CNR (Contrast-to-Noise Ratio)` that is presented as a proxy for
the :abbr:`INU (intensity non-uniformity)` artifact [Ganzetti2016]_.
Lower is better.
.. math::
\text{CJV} = \frac{\sigma_\text{WM} + \sigma_\text{GM}}{|\mu_\text{WM} - \mu_\text{GM}|}.
:param float mu_wm: mean of signal within white-matter mask.
:param float mu_gm: mean of signal within gray-matter mask.
:param float sigma_wm: standard deviation of signal within white-matter mask.
:param float sigma_gm: standard deviation of signal within gray-matter mask.
:return: the computed CJV
"""
return float((sigma_wm + sigma_gm) / abs(mu_wm - mu_gm))
def fber(img, headmask, rotmask=None):
r"""
Calculate the :abbr:`FBER (Foreground-Background Energy Ratio)` [Shehzad2015]_,
defined as the mean energy of image values within the head relative
to outside the head. Higher values are better.
.. math::
\text{FBER} = \frac{E[|F|^2]}{E[|B|^2]}
:param numpy.ndarray img: input data
:param numpy.ndarray headmask: a mask of the head (including skull, skin, etc.)
:param numpy.ndarray rotmask: a mask of empty voxels inserted after a rotation of
data
"""
fg_mu = np.median(np.abs(img[headmask > 0]) ** 2)
airmask = np.ones_like(headmask, dtype=np.uint8)
airmask[headmask > 0] = 0
if rotmask is not None:
airmask[rotmask > 0] = 0
bg_mu = np.median(np.abs(img[airmask == 1]) ** 2)
if bg_mu < 1.0e-3:
return 0
return float(fg_mu / bg_mu)
def efc(img, framemask=None):
r"""
Calculate the :abbr:`EFC (Entropy Focus Criterion)` [Atkinson1997]_.
Uses the Shannon entropy of voxel intensities as an indication of ghosting
and blurring induced by head motion. A range of low values is better,
with EFC = 0 for all the energy concentrated in one pixel.
.. math::
\text{E} = - \sum_{j=1}^N \frac{x_j}{x_\text{max}}
\ln \left[\frac{x_j}{x_\text{max}}\right]
with :math:`x_\text{max} = \sqrt{\sum_{j=1}^N x^2_j}`.
The original equation is normalized by the maximum entropy, so that the
:abbr:`EFC (Entropy Focus Criterion)` can be compared across images with
different dimensions:
.. math::
\text{EFC} = \left( \frac{N}{\sqrt{N}} \, \log{\sqrt{N}^{-1}} \right) \text{E}
:param numpy.ndarray img: input data
:param numpy.ndarray framemask: a mask of empty voxels inserted after a rotation of
data
"""
if framemask is None:
framemask = np.zeros_like(img, dtype=np.uint8)
n_vox = np.sum(1 - framemask)
# Calculate the maximum value of the EFC (which occurs any time all
# voxels have the same value)
efc_max = 1.0 * n_vox * (1.0 / np.sqrt(n_vox)) * \
np.log(1.0 / np.sqrt(n_vox))
# Calculate the total image energy
b_max = np.sqrt((img[framemask == 0]**2).sum())
# Calculate EFC (add 1e-16 to the image data to keep log happy)
return float((1.0 / efc_max) * np.sum((img[framemask == 0] / b_max) * np.log(
(img[framemask == 0] + 1e-16) / b_max)))
def wm2max(img, mu_wm):
r"""
Calculate the :abbr:`WM2MAX (white-matter-to-max ratio)`,
defined as the maximum intensity found in the volume w.r.t. the
mean value of the white matter tissue. Values close to 1.0 are
better:
.. math ::
\text{WM2MAX} = \frac{\mu_\text{WM}}{P_{99.95}(X)}
"""
return float(mu_wm / np.percentile(img.reshape(-1), 99.95))
def art_qi1(airmask, artmask):
r"""
Detect artifacts in the image using the method described in [Mortamet2009]_.
Caculates :math:`\text{QI}_1`, as the proportion of voxels with intensity
corrupted by artifacts normalized by the number of voxels in the background:
.. math ::
\text{QI}_1 = \frac{1}{N} \sum\limits_{x\in X_\text{art}} 1
Lower values are better.
:param numpy.ndarray airmask: input air mask, without artifacts
:param numpy.ndarray artmask: input artifacts mask
"""
# Count the number of voxels that remain after the opening operation.
# These are artifacts.
return float(artmask.sum() / (airmask.sum() + artmask.sum()))
def art_qi2(img, airmask, min_voxels=int(1e3), max_voxels=int(3e5), save_plot=True):
r"""
Calculates :math:`\text{QI}_2`, based on the goodness-of-fit of a centered
:math:`\chi^2` distribution onto the intensity distribution of
non-artifactual background (within the "hat" mask):
.. math ::
\chi^2_n = \frac{2}{(\sigma \sqrt{2})^{2n} \, (n - 1)!}x^{2n - 1}\, e^{-\frac{x}{2}}
where :math:`n` is the number of coil elements.
:param numpy.ndarray img: input data
:param numpy.ndarray airmask: input air mask without artifacts
"""
from sklearn.neighbors import KernelDensity
from scipy.stats import chi2
from mriqc.viz.misc import plot_qi2
# S. Ogawa was born
np.random.seed(1191935)
data = img[airmask > 0]
data = data[data > 0]
# Write out figure of the fitting
out_file = op.abspath('error.svg')
with open(out_file, 'w') as ofh:
ofh.write('<p>Background noise fitting could not be plotted.</p>')
if len(data) < min_voxels:
return 0.0, out_file
modelx = data if len(data) < max_voxels else np.random.choice(
data, size=max_voxels)
x_grid = np.linspace(0.0, np.percentile(data, 99), 1000)
# Estimate data pdf with KDE on a random subsample
kde_skl = KernelDensity(bandwidth=0.05 * np.percentile(data, 98),
kernel='gaussian').fit(modelx[:, np.newaxis])
kde = np.exp(kde_skl.score_samples(x_grid[:, np.newaxis]))
# Find cutoff
kdethi = np.argmax(kde[::-1] > kde.max() * 0.5)
# Fit X^2
param = chi2.fit(modelx[modelx < np.percentile(data, 95)], 32)
chi_pdf = chi2.pdf(x_grid, *param[:-2], loc=param[-2], scale=param[-1])
# Compute goodness-of-fit (gof)
gof = float(np.abs(kde[-kdethi:] - chi_pdf[-kdethi:]).mean())
if save_plot:
out_file = plot_qi2(x_grid, kde, chi_pdf, modelx, kdethi)
return gof, out_file
def volume_fraction(pvms):
r"""
Computes the :abbr:`ICV (intracranial volume)` fractions
corresponding to the (partial volume maps).
.. math ::
\text{ICV}^k = \frac{\sum_i p^k_i}{\sum\limits_{x \in X_\text{brain}} 1}
:param list pvms: list of :code:`numpy.ndarray` of partial volume maps.
"""
tissue_vfs = {}
total = 0
for k, lid in list(FSL_FAST_LABELS.items()):
if lid == 0:
continue
tissue_vfs[k] = pvms[lid - 1].sum()
total += tissue_vfs[k]
for k in list(tissue_vfs.keys()):
tissue_vfs[k] /= total
return {k: float(v) for k, v in list(tissue_vfs.items())}
def rpve(pvms, seg):
"""
Computes the :abbr:`rPVe (residual partial voluming error)`
of each tissue class.
.. math ::
\\text{rPVE}^k = \\frac{1}{N} \\left[ \\sum\\limits_{p^k_i \
\\in [0.5, P_{98}]} p^k_i + \\sum\\limits_{p^k_i \\in [P_{2}, 0.5)} 1 - p^k_i \\right]
"""
pvfs = {}
for k, lid in list(FSL_FAST_LABELS.items()):
if lid == 0:
continue
pvmap = pvms[lid - 1]
pvmap[pvmap < 0.] = 0.
pvmap[pvmap >= 1.] = 1.
totalvol = np.sum(pvmap > 0.0)
upth = np.percentile(pvmap[pvmap > 0], 98)
loth = np.percentile(pvmap[pvmap > 0], 2)
pvmap[pvmap < loth] = 0
pvmap[pvmap > upth] = 0
pvfs[k] = (pvmap[pvmap > 0.5].sum() + (1.0 - pvmap[pvmap <= 0.5]).sum()) / totalvol
return {k: float(v) for k, v in list(pvfs.items())}
def summary_stats(img, pvms, airmask=None, erode=True):
r"""
Estimates the mean, the standard deviation, the 95\%
and the 5\% percentiles of each tissue distribution.
.. warning ::
Sometimes (with datasets that have been partially processed), the air
mask will be empty. In those cases, the background stats will be zero
for the mean, median, percentiles and kurtosis, the sum of voxels in
the other remaining labels for ``n``, and finally the MAD and the
:math:`\sigma` will be calculated as:
.. math ::
\sigma_\text{BG} = \sqrt{\sum \sigma_\text{i}^2}
"""
from .. import MRIQC_LOG
from statsmodels.robust.scale import mad
# Check type of input masks
dims = np.squeeze(np.array(pvms)).ndim
if dims == 4:
# If pvms is from FSL FAST, create the bg mask
stats_pvms = [np.zeros_like(img)] + pvms
elif dims == 3:
stats_pvms = [np.ones_like(pvms) - pvms, pvms]
else:
raise RuntimeError('Incorrect image dimensions ({0:d})'.format(
np.array(pvms).ndim))
if airmask is not None:
stats_pvms[0] = airmask
labels = list(FSL_FAST_LABELS.items())
if len(stats_pvms) == 2:
labels = list(zip(['bg', 'fg'], list(range(2))))
output = {}
for k, lid in labels:
mask = np.zeros_like(img, dtype=np.uint8)
mask[stats_pvms[lid] > 0.85] = 1
if erode:
struc = nd.generate_binary_structure(3, 2)
mask = nd.binary_erosion(
mask, structure=struc).astype(np.uint8)
nvox = float(mask.sum())
if nvox < 1e3:
MRIQC_LOG.warning('calculating summary stats of label "%s" in a very small '
'mask (%d voxels)', k, int(nvox))
if k == 'bg':
continue
output[k] = {
'mean': float(img[mask == 1].mean()),
'stdv': float(img[mask == 1].std()),
'median': float(np.median(img[mask == 1])),
'mad': float(mad(img[mask == 1])),
'p95': float(np.percentile(img[mask == 1], 95)),
'p05': float(np.percentile(img[mask == 1], 5)),
'k': float(kurtosis(img[mask == 1])),
'n': nvox,
}
if 'bg' not in output:
output['bg'] = {
'mean': 0.,
'median': 0.,
'p95': 0.,
'p05': 0.,
'k': 0.,
'stdv': sqrt(sum(val['stdv']**2
for _, val in list(output.items()))),
'mad': sqrt(sum(val['mad']**2
for _, val in list(output.items()))),
'n': sum(val['n'] for _, val in list(output.items()))
}
if 'bg' in output and output['bg']['mad'] == 0.0 and output['bg']['stdv'] > 1.0:
MRIQC_LOG.warning('estimated MAD in the background was too small ('
'MAD=%f)', output['bg']['mad'])
output['bg']['mad'] = output['bg']['stdv'] / DIETRICH_FACTOR
return output
def _prepare_mask(mask, label, erode=True):
fgmask = mask.copy()
if np.issubdtype(fgmask.dtype, np.integer):
if isinstance(label, string_types):
label = FSL_FAST_LABELS[label]
fgmask[fgmask != label] = 0
fgmask[fgmask == label] = 1
else:
fgmask[fgmask > .95] = 1.
fgmask[fgmask < 1.] = 0
if erode:
# Create a structural element to be used in an opening operation.
struc = nd.generate_binary_structure(3, 2)
# Perform an opening operation on the background data.
fgmask = nd.binary_opening(fgmask, structure=struc).astype(np.uint8)
return fgmask
| bsd-3-clause |
analogdevicesinc/gnuradio | gr-analog/examples/fmtest.py | 40 | 7941 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
from gnuradio import analog
from gnuradio import channels
import sys, math, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class fmtx(gr.hier_block2):
def __init__(self, lo_freq, audio_rate, if_rate):
gr.hier_block2.__init__(self, "build_fm",
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(1, 1, gr.sizeof_gr_complex))
fmtx = analog.nbfm_tx(audio_rate, if_rate, max_dev=5e3, tau=75e-6)
# Local oscillator
lo = analog.sig_source_c(if_rate, # sample rate
analog.GR_SIN_WAVE, # waveform type
lo_freq, # frequency
1.0, # amplitude
0) # DC Offset
mixer = blocks.multiply_cc()
self.connect(self, fmtx, (mixer, 0))
self.connect(lo, (mixer, 1))
self.connect(mixer, self)
class fmtest(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._nsamples = 1000000
self._audio_rate = 8000
# Set up N channels with their own baseband and IF frequencies
self._N = 5
chspacing = 16000
freq = [10, 20, 30, 40, 50]
f_lo = [0, 1*chspacing, -1*chspacing, 2*chspacing, -2*chspacing]
self._if_rate = 4*self._N*self._audio_rate
# Create a signal source and frequency modulate it
self.sum = blocks.add_cc()
for n in xrange(self._N):
sig = analog.sig_source_f(self._audio_rate, analog.GR_SIN_WAVE, freq[n], 0.5)
fm = fmtx(f_lo[n], self._audio_rate, self._if_rate)
self.connect(sig, fm)
self.connect(fm, (self.sum, n))
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamples)
self.snk_tx = blocks.vector_sink_c()
self.channel = channels.channel_model(0.1)
self.connect(self.sum, self.head, self.channel, self.snk_tx)
# Design the channlizer
self._M = 10
bw = chspacing/2.0
t_bw = chspacing/10.0
self._chan_rate = self._if_rate / self._M
self._taps = filter.firdes.low_pass_2(1, self._if_rate, bw, t_bw,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
tpc = math.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps)
self.connect(self.channel, self.pfb)
# Create a file sink for each of M output channels of the filter and connect it
self.fmdet = list()
self.squelch = list()
self.snks = list()
for i in xrange(self._M):
self.fmdet.append(analog.nbfm_rx(self._audio_rate, self._chan_rate))
self.squelch.append(analog.standard_squelch(self._audio_rate*10))
self.snks.append(blocks.vector_sink_f())
self.connect((self.pfb, i), self.fmdet[i], self.squelch[i], self.snks[i])
def num_tx_channels(self):
return self._N
def num_rx_channels(self):
return self._M
def main():
fm = fmtest()
tstart = time.time()
fm.run()
tend = time.time()
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 100000
fftlen = 8192
winfunc = scipy.blackman
# Plot transmitted signal
fs = fm._if_rate
d = fm.snk_tx.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = sp1_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
visible=False)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-120.0, 20.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-5, 5])
# Set up the number of rows and columns for plotting the subfigures
Ncols = int(scipy.floor(scipy.sqrt(fm.num_rx_channels())))
Nrows = int(scipy.floor(fm.num_rx_channels() / Ncols))
if(fm.num_rx_channels() % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = fm._audio_rate
for i in xrange(len(fm.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = fm.snks[i].data()[Ns:Ne]
sp2_f = fig2.add_subplot(Nrows, Ncols, 1+i)
X,freq = sp2_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
visible=False)
#X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
X_o = 10.0*scipy.log10(abs(X))
#f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
f_o = scipy.arange(0, fs_o/2.0, fs_o/2.0/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+0.1])
sp2_f.set_ylim([-120.0, 20.0])
sp2_f.grid(True)
sp2_f.set_title(("Channel %d" % i), weight="bold")
sp2_f.set_xlabel("Frequency (kHz)")
sp2_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs_o
Tmax = len(d)*Ts
t_o = scipy.arange(0, Tmax, Ts)
x_t = scipy.array(d)
sp2_t = fig3.add_subplot(Nrows, Ncols, 1+i)
p2_t = sp2_t.plot(t_o, x_t.real, "b")
p2_t = sp2_t.plot(t_o, x_t.imag, "r")
sp2_t.set_xlim([min(t_o), max(t_o)+1])
sp2_t.set_ylim([-1, 1])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
ghchinoy/tensorflow | tensorflow/contrib/timeseries/examples/known_anomaly.py | 24 | 7880 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of using an exogenous feature to ignore a known anomaly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from os import path
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/changepoints.csv")
def state_space_estimator(exogenous_feature_columns):
"""Constructs a StructuralEnsembleRegressor."""
def _exogenous_update_condition(times, features):
del times # unused
# Make exogenous updates sparse by setting an update condition. This in
# effect allows missing exogenous features: if the condition evaluates to
# False, no update is performed. Otherwise we sometimes end up with "leaky"
# updates which add unnecessary uncertainty to the model even when there is
# no changepoint.
return tf.equal(tf.squeeze(features["is_changepoint"], axis=-1), "yes")
return (
tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=12,
# Extract a smooth period by constraining the number of latent values
# being cycled between.
cycle_num_latent_values=3,
num_features=1,
exogenous_feature_columns=exogenous_feature_columns,
exogenous_update_condition=_exogenous_update_condition),
# Use truncated backpropagation with a window size of 64, batching
# together 4 of these windows (random offsets) per training step. Training
# with exogenous features often requires somewhat larger windows.
4, 64)
def autoregressive_estimator(exogenous_feature_columns):
input_window_size = 8
output_window_size = 2
return (
tf.contrib.timeseries.ARRegressor(
periodicities=12,
num_features=1,
input_window_size=input_window_size,
output_window_size=output_window_size,
exogenous_feature_columns=exogenous_feature_columns),
64, input_window_size + output_window_size)
def train_and_evaluate_exogenous(
estimator_fn, csv_file_name=_DATA_FILE, train_steps=300):
"""Training, evaluating, and predicting on a series with changepoints."""
# Indicate the format of our exogenous feature, in this case a string
# representing a boolean value.
string_feature = tf.feature_column.categorical_column_with_vocabulary_list(
key="is_changepoint", vocabulary_list=["no", "yes"])
# Specify the way this feature is presented to the model, here using a one-hot
# encoding.
one_hot_feature = tf.feature_column.indicator_column(
categorical_column=string_feature)
estimator, batch_size, window_size = estimator_fn(
exogenous_feature_columns=[one_hot_feature])
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
# Indicate the format of our CSV file. First we have two standard columns,
# one for times and one for values. The third column is a custom exogenous
# feature indicating whether each timestep is a changepoint. The
# changepoint feature name must match the string_feature column name
# above.
column_names=(tf.contrib.timeseries.TrainEvalFeatures.TIMES,
tf.contrib.timeseries.TrainEvalFeatures.VALUES,
"is_changepoint"),
# Indicate dtypes for our features.
column_dtypes=(tf.int64, tf.float32, tf.string),
# This CSV has a header line; here we just ignore it.
skip_header_lines=1)
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=batch_size, window_size=window_size)
estimator.train(input_fn=train_input_fn, steps=train_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Create an input_fn for prediction, with a simulated changepoint. Since all
# of the anomalies in the training data are explained by the exogenous
# feature, we should get relatively confident predictions before the indicated
# changepoint (since we are telling the model that no changepoint exists at
# those times) and relatively uncertain predictions after.
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features={
"is_changepoint": [["no"] * 49 + ["yes"] + ["no"] * 50]})))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
# Indicate the locations of the changepoints for plotting vertical lines.
anomaly_locations = []
with open(csv_file_name, "r") as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row["is_changepoint"] == "yes":
anomaly_locations.append(int(row["time"]))
anomaly_locations.append(predictions["times"][49])
return (times, observed, all_times, mean, upper_limit, lower_limit,
anomaly_locations)
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit, anomaly_locations):
"""Plot the time series and anomalies in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.axvline(anomaly_locations[0], linestyle="dotted", label="changepoints")
for anomaly_location in anomaly_locations[1:]:
pyplot.axvline(anomaly_location, linestyle="dotted")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Ignoring a known anomaly (state space)",
*train_and_evaluate_exogenous(
estimator_fn=state_space_estimator))
make_plot("Ignoring a known anomaly (autoregressive)",
*train_and_evaluate_exogenous(
estimator_fn=autoregressive_estimator, train_steps=3000))
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
leesavide/pythonista-docs | Documentation/matplotlib/mpl_examples/api/custom_scale_example.py | 9 | 6401 | from __future__ import unicode_literals
import numpy as np
from numpy import ma
from matplotlib import scale as mscale
from matplotlib import transforms as mtransforms
from matplotlib.ticker import Formatter, FixedLocator
class MercatorLatitudeScale(mscale.ScaleBase):
"""
Scales data in range -pi/2 to pi/2 (-90 to 90 degrees) using
the system used to scale latitudes in a Mercator projection.
The scale function:
ln(tan(y) + sec(y))
The inverse scale function:
atan(sinh(y))
Since the Mercator scale tends to infinity at +/- 90 degrees,
there is user-defined threshold, above and below which nothing
will be plotted. This defaults to +/- 85 degrees.
source:
http://en.wikipedia.org/wiki/Mercator_projection
"""
# The scale class must have a member ``name`` that defines the
# string used to select the scale. For example,
# ``gca().set_yscale("mercator")`` would be used to select this
# scale.
name = 'mercator'
def __init__(self, axis, **kwargs):
"""
Any keyword arguments passed to ``set_xscale`` and
``set_yscale`` will be passed along to the scale's
constructor.
thresh: The degree above which to crop the data.
"""
mscale.ScaleBase.__init__(self)
thresh = kwargs.pop("thresh", (85 / 180.0) * np.pi)
if thresh >= np.pi / 2.0:
raise ValueError("thresh must be less than pi/2")
self.thresh = thresh
def get_transform(self):
"""
Override this method to return a new instance that does the
actual transformation of the data.
The MercatorLatitudeTransform class is defined below as a
nested class of this one.
"""
return self.MercatorLatitudeTransform(self.thresh)
def set_default_locators_and_formatters(self, axis):
"""
Override to set up the locators and formatters to use with the
scale. This is only required if the scale requires custom
locators and formatters. Writing custom locators and
formatters is rather outside the scope of this example, but
there are many helpful examples in ``ticker.py``.
In our case, the Mercator example uses a fixed locator from
-90 to 90 degrees and a custom formatter class to put convert
the radians to degrees and put a degree symbol after the
value::
"""
class DegreeFormatter(Formatter):
def __call__(self, x, pos=None):
# \u00b0 : degree symbol
return "%d\u00b0" % ((x / np.pi) * 180.0)
deg2rad = np.pi / 180.0
axis.set_major_locator(FixedLocator(
np.arange(-90, 90, 10) * deg2rad))
axis.set_major_formatter(DegreeFormatter())
axis.set_minor_formatter(DegreeFormatter())
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Override to limit the bounds of the axis to the domain of the
transform. In the case of Mercator, the bounds should be
limited to the threshold that was passed in. Unlike the
autoscaling provided by the tick locators, this range limiting
will always be adhered to, whether the axis range is set
manually, determined automatically or changed through panning
and zooming.
"""
return max(vmin, -self.thresh), min(vmax, self.thresh)
class MercatorLatitudeTransform(mtransforms.Transform):
# There are two value members that must be defined.
# ``input_dims`` and ``output_dims`` specify number of input
# dimensions and output dimensions to the transformation.
# These are used by the transformation framework to do some
# error checking and prevent incompatible transformations from
# being connected together. When defining transforms for a
# scale, which are, by definition, separable and have only one
# dimension, these members should always be set to 1.
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, thresh):
mtransforms.Transform.__init__(self)
self.thresh = thresh
def transform_non_affine(self, a):
"""
This transform takes an Nx1 ``numpy`` array and returns a
transformed copy. Since the range of the Mercator scale
is limited by the user-specified threshold, the input
array must be masked to contain only valid values.
``matplotlib`` will handle masked arrays and remove the
out-of-range data from the plot. Importantly, the
``transform`` method *must* return an array that is the
same shape as the input array, since these values need to
remain synchronized with values in the other dimension.
"""
masked = ma.masked_where((a < -self.thresh) | (a > self.thresh), a)
if masked.mask.any():
return ma.log(np.abs(ma.tan(masked) + 1.0 / ma.cos(masked)))
else:
return np.log(np.abs(np.tan(a) + 1.0 / np.cos(a)))
def inverted(self):
"""
Override this method so matplotlib knows how to get the
inverse transform for this transform.
"""
return MercatorLatitudeScale.InvertedMercatorLatitudeTransform(self.thresh)
class InvertedMercatorLatitudeTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, thresh):
mtransforms.Transform.__init__(self)
self.thresh = thresh
def transform_non_affine(self, a):
return np.arctan(np.sinh(a))
def inverted(self):
return MercatorLatitudeScale.MercatorLatitudeTransform(self.thresh)
# Now that the Scale class has been defined, it must be registered so
# that ``matplotlib`` can find it.
mscale.register_scale(MercatorLatitudeScale)
if __name__ == '__main__':
import matplotlib.pyplot as plt
t = np.arange(-180.0, 180.0, 0.1)
s = t / 360.0 * np.pi
plt.plot(t, s, '-', lw=2)
plt.gca().set_yscale('mercator')
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.title('Mercator: Projection of the Oppressor')
plt.grid(True)
plt.show()
| apache-2.0 |
SCP-028/UGA | protein_pka/mcce/mcce.py | 1 | 17127 | #!python3
"""
Predict protein pKa based on MCCE method.
http://pka.engr.ccny.cuny.edu/
Require MCCE 3.0 to work: https://anaconda.org/SalahSalah/mcce/files
"""
import asyncio
import glob
import gzip
import locale
import logging
import math
import os
import re
import shutil
import subprocess
import sys
import time
from multiprocessing import Pool
from urllib.request import urlopen
import aioftp
import pandas as pd
import uvloop
# Sapelo Locale is broken, quick fix
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
# Set working directory
ROOTPATH = os.path.dirname(os.path.realpath(sys.argv[0]))
os.chdir(ROOTPATH)
# Log settings
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(f"./pKa_calculation_{__file__}.log")
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s\t%(levelname)s\t"
"[%(filename)s:%(lineno)s -%(funcName)12s()]\t%(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
class pdb:
def __init__(self):
self.all_ids = []
self.download_ids = [] # Download -> Unzip -> Preprocess -> Calculate
self.unzip_ids = [] # Unzip -> Preprocess -> Calculate
self.preprocess_ids = [] # Preprocess -> Calculate
self.ready_ids = [] # Calculate
self.finished_ids = [] # Successfully calculated IDs
self.error_ids = [] # Error in download, unzip, or calculation
# IDs this script will work on (messy queue implementation)
self.working_ids = []
def load_id(self):
"""
First try to get existing pKa values,
then get the list of PDB files to download.
"""
for folder in ["./pdb", "./annotation", "./results"]:
try:
os.makedirs(folder)
except OSError:
pass
self.finished_ids = [id[-8:-4] for id in glob.glob("./results/*.pka")]
logger.debug(f"{len(self.finished_ids)} finished files.")
# Create file even at first run so that the results folder doesn't get deleted
with open("./results/finished_ids.list", "a") as f:
f.write("\n".join(self.finished_ids))
self.ready_ids = list(set(
[id[-12:-8].upper() for id in glob.glob("./pdb/*/*.pdb.bak")]) - set(self.finished_ids))
logger.debug(f"{len(self.ready_ids)} files ready to be calculated.")
self.preprocess_ids = list(set([id[-8:-4].upper() for id in glob.glob(
"./pdb/*/*.pdb") if "out" not in id]) - set(self.finished_ids) - set(self.ready_ids))
logger.debug(
f"{len(self.preprocess_ids)} files ready to be preprocessed.")
self.unzip_ids = [id[-11:-7].upper() for id in glob.glob("./*.ent.gz")]
logger.debug(f"{len(self.unzip_ids)} files ready to be unzipped.")
if not os.path.exists("./annotation/uniprot_id_mapping.dat"):
with urlopen("ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/idmapping/by_organism/HUMAN_9606_idmapping.dat.gz") as remotefile:
logger.debug(
"Saving UniProt ID mapping data since it doesn't exist...")
with open("./annotation/uniprot_id_mapping.dat.gz", "wb") as f:
f.write(remotefile.read())
with gzip.open(
"./annotation/uniprot_id_mapping.dat.gz", "rb") as inFile, open(
"./annotation/uniprot_id_mapping.dat", "wb") as outFile:
shutil.copyfileobj(inFile, outFile)
os.remove("./annotation/uniprot_id_mapping.dat.gz")
else:
logger.debug("UniProt ID mapping data exists.")
logger.debug("Reading all possible PDB IDs...")
annot = pd.read_csv("./annotation/uniprot_id_mapping.dat",
sep="\t", header=None,
names=["uniprot", "id", "value"])
self.all_ids = annot.loc[annot.id == "PDB", "value"].tolist()
self.download_ids = list(set(self.all_ids) - set(self.unzip_ids) - set(
self.preprocess_ids) - set(self.ready_ids) - set(self.finished_ids))
logger.info(
f"{len(self.download_ids)} PDB files need to be downloaded.")
def get_link(self, ids):
""" Get PDB file links from:
ftp://ftp.wwpdb.org/pub/pdb/data/structures/divided/pdb/ ,
and create folders to store the files.
Parameters
----------
ids: list
The PDB IDs to download.
Returns
-------
Links to download.
"""
if isinstance(ids, list):
ids = [id[:4].lower() for id in ids] # pdb file IDs
pdb_names = [f"{id}.ent.gz" for id in ids] # pdb filenames
# subdirectory of the pdb files
pdbDirs = [id[1:3].lower() for id in ids]
remoteaddr = [
f"ftp://ftp.wwpdb.org/pub/pdb/data/structures/divided/pdb/{pdbDir}/pdb{pdb_name}" for pdbDir, pdb_name in zip(pdbDirs, pdb_names)]
else:
raise TypeError(f"{id} is not a string or list.")
return remoteaddr
def make_dirs(self, ids):
"""Make sure the download directory exists."""
for id in ids:
try:
os.makedirs(os.path.join(ROOTPATH, "pdb", id.upper()))
except OSError:
pass
async def download_worker(self, session, url):
"""Download the given url to working directory."""
url = url[len("ftp://ftp.wwpdb.org"):]
logger.debug(f"Downloading {url}")
try:
await session.download(url)
self.unzip_ids.append(url[-11:-7].upper())
except Exception as e:
self.error_ids.append(url[-11:-7].upper())
logger.warning(f"Error when downloading {url}: {e}")
async def download_session(self, sem, work_queue):
""" Get urls from the queue and pass to worker.
Parameters
----------
sem: asyncio.Semaphore object
work_queue: asyncio.Queue object
"""
while not work_queue.empty():
url = await work_queue.get()
logger.debug(f"Got url from queue: {url}")
async with sem:
async with aioftp.ClientSession("ftp.wwpdb.org") as session:
await self.download_worker(session, url)
def download_queue(self, urls):
""" Create a queue to download all the given urls.
Parameters
----------
urls: list
A list of urls to download.
Returns
-------
Downloaded "*.ent.gz" files in working directory.
"""
logger.debug(f"{len(urls)} urls to download.")
loop = uvloop.new_event_loop()
asyncio.set_event_loop(loop)
q = asyncio.Queue()
sem = asyncio.Semaphore(10)
[q.put_nowait(url) for url in urls]
tasks = [asyncio.ensure_future(self.download_session(sem, q))
for _ in range(len(urls))]
loop.run_until_complete(asyncio.gather(*tasks))
# Zero-sleep to allow underlying connections to close
loop.run_until_complete(asyncio.sleep(0))
loop.close()
def check_mcce(self):
"""Check if MCCE 3.0 exists."""
if not os.path.exists(os.path.join(ROOTPATH, "mcce3.0")):
if not os.path.exists(os.path.join(ROOTPATH, "mcce3.0.tar.bz2")):
logger.debug("MCCE isn't downloaded yet. Retrieving...")
with urlopen("https://anaconda.org/SalahSalah/mcce/3.0/download/linux-64/mcce-3.0-0.tar.bz2") as remotefile:
with open("./mcce-3.0-0.tar.bz2", 'wb') as f:
f.write(remotefile.read())
subprocess.run(["tar", "-xjf", "mcce-3.0-0.tar.bz2"])
shutil.move("./info/recipe/mcce3.0", "./mcce3.0")
shutil.rmtree(os.path.join(ROOTPATH, "info"), ignore_errors=True)
shutil.rmtree(os.path.join(ROOTPATH, "bin"), ignore_errors=True)
else:
logger.info("MCCE 3.0 exists, proceeding to calculation...")
def unzip(self, id):
"""Unzip downloaded *.ent.gz file."""
try:
saved_pdb = os.path.join(ROOTPATH, "pdb", id, f"{id}.pdb")
with gzip.open(f"pdb{id.lower()}.ent.gz", "rb") as inFile, open(saved_pdb, "wb") as outFile:
shutil.copyfileobj(inFile, outFile)
os.remove(f"pdb{id.lower()}.ent.gz")
self.preprocess_ids.append(id)
except Exception as e:
self.error_ids.append(id)
logger.warning(f"Unzip of {id} unsuccessful: {e}")
def preprocess(self, id, backup=True):
"""
This program will:
1) strip lines other than ATOM and HETATM records
2) keep the first model of an NMR structure
3) delete H and D atoms
4) MSE to MET residue
5) keep only one atom alternate position
6) keep defined chains, if chain ID(s) are given in command
7) remove some cofactors and salt ions
Parameters
----------
id: str
The PDB ID to find the file.
backup: bool, optional
Whether to backup the original file or not. Default is True,
and save to "original.bak".
Returns
-------
Nothing, modify the file in place.
"""
removable_res = [
" ZN", "PCA", "XYP", " NA", " CL", " CA", " MG", " MN", "HOH"
]
model_start = False
newlines = []
ID = id.upper()
filepath = os.path.join(ROOTPATH, "pdb", ID, f"{ID}.pdb")
if backup:
shutil.copy2(filepath, f"{filepath}.bak")
with open(filepath) as f:
for line in f:
if line[:5] == "MODEL":
model_start = True
if model_start and line[:6] == "ENDMDL":
break
if line[:6] != "ATOM " and line[:6] != "HETATM":
continue # discard non ATOM records
if line[13] == "H" or line[12] == "H":
continue
if line[16] == "A":
line = f"{line[:16]} {line[17:]}"
elif line[16] != " ":
continue # delete this line, alternative posion is not A or empty
if line[:6] == "HETATM" and line[17:20] == "MSE":
if line[12:15] == "SE ":
line = f"ATOM {line[6:12]} SD{line[15:17]}MET{line[20:]}"
else:
line = f"ATOM {line[6:17]}MET{line[20:]}"
res = line[17:20]
if res in removable_res:
continue
newlines.append(line.rstrip())
with open(filepath, "w") as f:
f.write("\n".join(newlines))
logger.debug(f"{ID} preprocessing complete.")
def set_params(self, id, quickrun=True):
"""
Set the parameters for MCCE.
Parameters
----------
id: str
The PDB ID of the file.
quickrun: bool, optional
Use "run.prm.quick" or "run.prm.default".
Returns
-------
run.prm: a file describing the parameters that points to the PDB file.
"""
pkgpath = os.path.join(ROOTPATH, "mcce3.0")
ID = id.upper()
filepath = os.path.join(ROOTPATH, "pdb", ID)
newlines = []
if quickrun:
shutil.copy2(
os.path.join(pkgpath, "run.prm.quick"),
os.path.join(filepath, "run.prm")
)
else:
shutil.copy2([
os.path.join(pkgpath, "run.prm.default"),
os.path.join(filepath, "run.prm")
])
with open(os.path.join(filepath, "run.prm")) as f:
for line in f:
line = line.rstrip()
if line.endswith("(INPDB)"):
line = re.sub(r"^[^\s]+", fr"{id}.pdb", line)
if line.endswith(("(DO_PREMCCE)", "(DO_ROTAMERS)",
"(DO_ENERGY)", "(DO_MONTE)")):
line = re.sub(r"^f", r"t", line)
if line.endswith("(EPSILON_PROT)"):
line = re.sub(r"^[\d\.]+", r"8.0", line)
if line.startswith("/home/mcce/mcce3.0"):
line = re.sub(r"^/.*3\.0", pkgpath,
line)
newlines.append(line)
with open(os.path.join(filepath, "run.prm"), "w") as f:
f.write("\n".join(newlines))
self.ready_ids.append(ID)
logger.debug(f"Parameters set for {ID}.")
def split_ready_ids(self, num):
""" A naive queue implementation for multiple scripts.
Parameters
----------
num: int
Which part of the IDs to work on.
Returns
-------
A list of the actual IDs to work on, and save the lists of IDs for
other scripts to work with if this is the first instance.
"""
if os.path.isfile(os.path.join(ROOTPATH, "results", "working_ids.list")):
with open(os.path.join(ROOTPATH, "results", f"working_ids.list{num}"), "r") as f:
self.working_ids = [line.strip() for line in f]
else:
n = math.ceil(len(self.ready_ids) / 10)
self.working_ids = [self.ready_ids[i:i + n]
for i in range(0, len(self.ready_ids), n)]
metafile = []
for i, ids in enumerate(self.working_ids):
metafile.append(os.path.join(
ROOTPATH, "results", f"working_ids.list{i}"))
with open(os.path.join(ROOTPATH, "results", f"working_ids.list{i}"), "w") as f:
f.write("\n".join(ids))
logger.debug(
f"Saved {len(ids)} IDs to file working_ids.list{i} .")
with open(os.path.join(ROOTPATH, "results", "working_ids.list"), "w") as f:
f.write("\n".join(metafile))
self.working_ids = self.working_ids[num]
def calc_pka(self, id, clean=True):
""" Calculate protein pKa values using MCCE.
https://sites.google.com/site/mccewiki/home
Parameters
----------
id: str
The PDB ID of the protein calculated.
clean: bool, optional
Only keep the PDB file, run log and pKa output.
Returns
-------
A set of files in a subdirectory named after the ID.
See user manual for detail.
"""
id = id.upper()
os.chdir(os.path.realpath(os.path.join(ROOTPATH, "pdb", id)))
logger.info(f"{id} calculation started.")
start = time.time()
with open(f"{id}.run.log", "w") as f:
subprocess.run(f"{ROOTPATH}/mcce3.0/mcce", stdout=f)
with open(f"{id}.run.log", "rb") as f:
last = f.readlines()[-1].decode().lstrip()
if last.startswith(("Fatal", "FATAL", "WARNING", "STOP")):
self.error_ids.append(id)
logger.warning(
f"{id} calculation aborted after {time.time() - start}s, due to {last}")
else:
self.finished_ids.append(id)
logger.info(
f"{id} calculation finished, used {time.time() - start}s.")
shutil.move("pK.out", os.path.join(
ROOTPATH, "results", f"{id}.pka"))
if clean:
del_list = [i for i in os.listdir() if i not in (
"pK.out", f"{id}.run.log", f"{id}.pdb.bak")]
[os.remove(item) for item in del_list]
if __name__ == "__main__":
x = pdb()
x.load_id()
urls = x.get_link(x.download_ids)
x.make_dirs(x.all_ids)
x.download_queue(urls)
x.check_mcce()
for id in x.unzip_ids:
x.unzip(id)
for id in x.preprocess_ids:
try:
x.preprocess(id)
x.set_params(id)
except Exception as e:
x.error_ids.append(id)
logger.warning(f"Preprocess of {id}: {e}")
# subprocess.run(["find", ".", "-type", "d", "-empty", "-delete"])
x.split_ready_ids(0) # 0 - 9, run 0 first to generate other lists
with Pool(os.cpu_count()) as p:
p.map(x.calc_pka, x.working_ids)
with open("./results/finished_ids.list", "a") as f:
f.write("\n".join(x.working_ids))
with open("./results/error_ids.list", "a") as f:
f.write("\n".join(x.error_ids))
| apache-2.0 |
smsolivier/VEF | code/hlimit.py | 1 | 2247 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import ld as LD
import dd as DD
from hidespines import *
import sys
''' compares difference between Sn and moment equations as cell width --> 0 '''
if (len(sys.argv) > 1):
outfile = sys.argv[1]
else:
outfile = None
def getDiff(sol, tol=1e-6):
diff = np.zeros(len(sol))
for i in range(len(sol)):
x, phi, it = sol[i].sourceIteration(tol)
# diff[i] = np.linalg.norm(phi - sol[i].phi_SN, 2)/np.linalg.norm(sol[i].phi_SN, 2)
diff[i] = np.linalg.norm(phi - sol[i].phi_SN, 2)/np.linalg.norm(sol[i].phi_SN, 2)
return diff
N = 100
n = 8
xb = 1
Sigmaa = lambda x: .1
Sigmat = lambda x: 1
q = lambda x, mu: 1
tol = 1e-10
N = np.logspace(1, 3, 5)
N = np.array([int(x) for x in N])
ed00 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=0, GAUSS=0) for x in N]
ed01 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=0, GAUSS=1) for x in N]
ed10 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=1, GAUSS=0) for x in N]
ed11 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=1, GAUSS=1) for x in N]
ed20 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=2, GAUSS=0) for x in N]
ed21 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=2, GAUSS=1) for x in N]
diff00 = getDiff(ed00, tol)
diff01 = getDiff(ed01, tol)
diff10 = getDiff(ed10, tol)
diff11 = getDiff(ed11, tol)
diff20 = getDiff(ed20, tol)
diff21 = getDiff(ed21, tol)
fontsize=16
plt.loglog(xb/N, diff00, '-o', clip_on=False, label='MHFEM Edges, No Gauss')
plt.loglog(xb/N, diff01, '-o', clip_on=False, label='Maintain Slopes, No Gauss')
plt.loglog(xb/N, diff10, '-o', clip_on=False, label='MHFEM Edges, Gauss')
plt.loglog(xb/N, diff11, '-o', clip_on=False, label='Maintain Slopes, Gauss')
plt.loglog(xb/N, diff20, '-o', clip_on=False, label='vanLeer, No Gauss')
plt.loglog(xb/N, diff21, '-o', clip_on=False, label='vanLeer, Gauss')
plt.xlabel(r'$h$', fontsize=fontsize)
plt.ylabel('SN/MHFEM Convergence', fontsize=fontsize)
plt.legend(loc='best', frameon=False)
hidespines(plt.gca())
if (outfile != None):
plt.savefig(outfile, transparent=True)
else:
plt.show()
| mit |
phobson/wqio | wqio/tests/test_datacollections.py | 2 | 28761 | from distutils.version import LooseVersion
from textwrap import dedent
from io import StringIO
import numpy
import scipy
from scipy import stats
import pandas
from unittest import mock
import pytest
import pandas.testing as pdtest
from wqio.tests import helpers
from wqio.features import Location, Dataset
from wqio.datacollections import DataCollection, _dist_compare
OLD_SCIPY = LooseVersion(scipy.version.version) < LooseVersion("0.19")
def check_stat(expected_csv, result, comp=False):
index_col = [0]
if comp:
index_col += [1]
file_obj = StringIO(dedent(expected_csv))
expected = pandas.read_csv(file_obj, header=[0, 1], index_col=index_col)
if comp:
expected = expected.stack(level=-1)
pdtest.assert_frame_equal(
expected.sort_index(axis="columns"),
result.sort_index(axis="columns").round(6),
atol=1e-5,
)
def remove_g_and_h(group):
return group.name[1] not in ["G", "H"]
@pytest.fixture
def dc():
df = helpers.make_dc_data_complex()
dc = DataCollection(
df,
rescol="res",
qualcol="qual",
stationcol="loc",
paramcol="param",
ndval="<",
othergroups=None,
pairgroups=["state", "bmp"],
useros=True,
filterfxn=remove_g_and_h,
bsiter=10000,
)
return dc
@pytest.fixture
def dc_noNDs():
df = helpers.make_dc_data_complex()
dc = DataCollection(
df,
rescol="res",
qualcol="qual",
stationcol="loc",
paramcol="param",
ndval="junk",
othergroups=None,
pairgroups=["state", "bmp"],
useros=True,
filterfxn=remove_g_and_h,
bsiter=10000,
)
return dc
def test_basic_attr(dc):
assert dc._raw_rescol == "res"
assert isinstance(dc.data, pandas.DataFrame)
assert dc.roscol == "ros_res"
assert dc.rescol == "ros_res"
assert dc.qualcol == "qual"
assert dc.stationcol == "loc"
assert dc.paramcol == "param"
assert dc.ndval == ["<"]
assert dc.bsiter == 10000
assert dc.groupcols == ["loc", "param"]
assert dc.tidy_columns == ["loc", "param", "res", "__censorship"]
assert hasattr(dc, "filterfxn")
def test_data(dc):
assert isinstance(dc.data, pandas.DataFrame)
assert dc.data.shape == (519, 8)
assert "G" in dc.data["param"].unique()
assert "H" in dc.data["param"].unique()
@pytest.mark.parametrize("useros", [True, False])
def test_tidy(dc, useros):
assert isinstance(dc.tidy, pandas.DataFrame)
assert dc.tidy.shape == (388, 5)
assert "G" not in dc.tidy["param"].unique()
assert "H" not in dc.tidy["param"].unique()
collist = ["loc", "param", "res", "__censorship", "ros_res"]
assert dc.tidy.columns.tolist() == collist
def test_paired(dc):
assert isinstance(dc.paired, pandas.DataFrame)
assert dc.paired.shape == (164, 6)
assert "G" not in dc.paired.index.get_level_values("param").unique()
assert "H" not in dc.paired.index.get_level_values("param").unique()
dc.paired.columns.tolist() == [
("res", "Inflow"),
("res", "Outflow"),
("res", "Reference"),
("__censorship", "Inflow"),
("__censorship", "Outflow"),
("__censorship", "Reference"),
]
def test_count(dc):
known_csv = """\
station,Inflow,Outflow,Reference
result,Count,Count,Count
param,,,
A,21,22,20
B,24,22,19
C,24,24,25
D,24,25,21
E,19,16,20
F,21,24,17
"""
check_stat(known_csv, dc.count)
def test_n_unique(dc):
known_csv = """\
loc,Inflow,Outflow,Reference
result,bmp,bmp,bmp
param,,,
A,7,7,7
B,7,7,7
C,7,7,7
D,7,7,7
E,7,7,7
F,7,7,7
G,7,7,7
H,7,7,7
"""
check_stat(known_csv, dc.n_unique("bmp"))
@helpers.seed
def test_median(dc):
known_csv = """\
station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference
result,lower,median,upper,lower,median,upper,lower,median,upper
param,,,,,,,,,
A,0.334506,1.197251,2.013994,0.860493,2.231058,2.626023,1.073386,1.639472,1.717293
B,1.366948,2.773989,3.297147,0.23201,1.546499,2.579206,0.204164,1.565076,2.196367
C,0.17351,0.525957,0.68024,0.247769,0.396984,0.540742,0.136462,0.412693,0.559458
D,0.374122,1.201892,2.098846,0.516989,1.362759,1.827087,0.314655,0.882695,1.24545
E,0.276095,1.070858,1.152887,0.287914,0.516746,1.456859,0.366824,0.80716,2.040739
F,0.05667,0.832488,1.310575,0.425237,1.510942,2.193997,0.162327,0.745993,1.992513
"""
check_stat(known_csv, dc.median)
@helpers.seed
def test_mean(dc):
known_csv = """\
station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference
result,lower,mean,upper,lower,mean,upper,lower,mean,upper
param,,,,,,,,,
A,1.231607,2.646682,4.204054,1.930601,5.249281,9.081952,1.540167,3.777974,6.389439
B,2.99031,7.647175,12.810844,1.545539,6.863835,12.705913,1.010374,4.504255,9.592572
C,0.37496,0.513248,0.65948,0.411501,1.004637,1.706317,0.35779,0.541962,0.734751
D,1.29141,3.021235,4.987855,1.285899,2.318808,3.451824,1.008364,1.945828,2.924812
E,0.818641,1.914696,3.049554,0.584826,1.098241,1.640807,1.113589,2.283292,3.581946
F,0.8379,9.825404,25.289933,1.497825,3.450184,5.61929,0.939917,2.491708,4.094258
"""
check_stat(known_csv, dc.mean)
@helpers.seed
def test_std_dev(dc):
known_csv = """\
station,Inflow,Outflow,Reference
result,std. dev.,std. dev.,std. dev.
param,,,
A,3.58649,8.719371,5.527633
B,12.360099,13.60243,10.759285
C,0.353755,1.691208,0.493325
D,4.811938,2.849393,2.248178
E,2.55038,1.096698,2.789238
F,34.447565,5.361033,3.398367
"""
check_stat(known_csv, dc.std_dev)
@helpers.seed
def test_percentile_25(dc):
known_csv = """\
station,Inflow,Outflow,Reference
result,pctl 25,pctl 25,pctl 25
param,,,
A,0.522601,0.906029,1.094721
B,1.472541,0.251126,0.314226
C,0.164015,0.267521,0.136462
D,0.35688,0.516989,0.383895
E,0.364748,0.311508,0.394658
F,0.120068,0.406132,0.224429
"""
check_stat(known_csv, dc.percentile(25))
@helpers.seed
def test_percentile_75(dc):
known_csv = """\
station,Inflow,Outflow,Reference
result,pctl 75,pctl 75,pctl 75
param,,,
A,2.563541,3.838021,2.650648
B,4.728871,2.849948,2.261847
C,0.776388,0.853535,0.792612
D,3.04268,2.79341,3.611793
E,1.532775,1.59183,3.201534
F,1.792985,2.80979,2.742249
"""
check_stat(known_csv, dc.percentile(75))
@helpers.seed
def test_logmean(dc):
known_csv = """\
station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference
result,Log-mean,lower,upper,Log-mean,lower,upper,Log-mean,lower,upper
param,,,,,,,,,
A,0.140559,-0.55112,0.644202,0.733004,0.047053,1.22099,0.545205,-0.057683,1.029948
B,1.026473,0.368659,1.541241,0.105106,-0.939789,0.860244,0.068638,-0.932357,0.661203
C,-0.963004,-1.304115,-0.638446,-0.83221,-1.464092,-0.414379,-1.088377,-1.556795,-0.720706
D,0.062317,-0.663241,0.58349,0.185757,-0.325074,0.598432,-0.063507,-0.670456,0.434214
E,-0.103655,-0.751075,0.385909,-0.456202,-1.08692,0.029967,-0.068135,-0.787007,0.51226
F,-0.442721,-1.874677,0.344704,0.211658,-0.504166,0.734283,-0.253352,-1.175917,0.467231
"""
check_stat(known_csv, dc.logmean)
@helpers.seed
def test_logstd_dev(dc):
known_csv = """\
station,Inflow,Outflow,Reference
result,Log-std. dev.,Log-std. dev.,Log-std. dev.
param,,,
A,1.374026,1.343662,1.225352
B,1.430381,2.07646,1.662001
C,0.818504,1.263631,1.057177
D,1.530871,1.187246,1.277927
E,1.264403,1.121038,1.474431
F,2.324063,1.516331,1.701596
"""
check_stat(known_csv, dc.logstd_dev)
@helpers.seed
def test_geomean(dc):
known_csv = """\
station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference
Geo-mean,Log-mean,lower,upper,Log-mean,lower,upper,Log-mean,lower,upper
param,,,,,,,,,
A,1.150917,0.576304,1.904467,2.081323,1.048178,3.390543,1.724962,0.943949,2.800919
B,2.791205,1.445795,4.670381,1.110829,0.39071,2.363737,1.071049,0.393625,1.937121
C,0.381744,0.271413,0.528113,0.435087,0.231288,0.66075,0.336763,0.210811,0.486409
D,1.064299,0.515179,1.792283,1.204129,0.722474,1.819264,0.938467,0.511475,1.543749
E,0.901536,0.471859,1.470951,0.633686,0.337254,1.03042,0.934134,0.455205,1.66906
F,0.642286,0.153405,1.411572,1.235726,0.604009,2.083988,0.776195,0.308536,1.595571
"""
check_stat(known_csv, dc.geomean)
@helpers.seed
def test_geostd_dev(dc):
known_csv = """\
station,Inflow,Outflow,Reference
Geo-std. dev.,Log-std. dev.,Log-std. dev.,Log-std. dev.
param,,,
A,3.951225,3.833055,3.405365
B,4.180294,7.976181,5.269843
C,2.267105,3.538244,2.878234
D,4.622199,3.278041,3.589191
E,3.540977,3.068036,4.368548
F,10.217099,4.55548,5.48269
"""
check_stat(known_csv, dc.geostd_dev)
@helpers.seed
def test_shapiro(dc):
known_csv = """\
station,Inflow,Inflow,Outflow,Outflow,Reference,Reference
result,pvalue,statistic,pvalue,statistic,pvalue,statistic
param,,,,,,
A,1.8e-05,0.685783,1e-06,0.576069,4e-06,0.61735
B,1e-06,0.594411,0.0,0.530962,0.0,0.41471
C,0.028774,0.905906,0.0,0.546626,0.00279,0.860373
D,1e-06,0.622915,1.5e-05,0.722374,0.000202,0.76518
E,1.7e-05,0.654137,0.004896,0.818813,0.000165,0.74917
F,0.0,0.292916,2e-06,0.634671,0.000167,0.713968
"""
check_stat(known_csv, dc.shapiro)
@helpers.seed
def test_shapiro_log(dc):
known_csv = """\
station,Inflow,Inflow,Outflow,Outflow,Reference,Reference
result,statistic,pvalue,statistic,pvalue,statistic,pvalue
param,,,,,,
A,0.983521938,0.96662426,0.979861856,0.913820148,0.939460814,0.234214202
B,0.957531095,0.390856266,0.97048676,0.722278714,0.967978418,0.735424638
C,0.906479359,0.029602444,0.974698305,0.78197974,0.967106879,0.572929323
D,0.989704251,0.995502174,0.990663111,0.997093379,0.964812279,0.617747009
E,0.955088913,0.479993254,0.95211035,0.523841977,0.963425279,0.61430341
F,0.97542423,0.847370088,0.982230783,0.933124721,0.966197193,0.749036908
"""
check_stat(known_csv, dc.shapiro_log)
@helpers.seed
def test_lilliefors(dc):
known_csv = """\
station,Inflow,Inflow,Outflow,Outflow,Reference,Reference
result,lilliefors,pvalue,lilliefors,pvalue,lilliefors,pvalue
param,,,,,,
A,0.308131,1.4e-05,0.340594,0.0,0.364453,0.0
B,0.36764,0.0,0.420343,0.0,0.417165,0.0
C,0.166799,0.082737,0.324733,0.0,0.161753,0.090455
D,0.273012,6.7e-05,0.240311,0.000665,0.296919,3.7e-05
E,0.341398,3e-06,0.239314,0.014862,0.233773,0.005474
F,0.419545,0.0,0.331315,0.0,0.284249,0.000741
"""
check_stat(known_csv, dc.lilliefors)
@helpers.seed
def test_lilliefors_log(dc):
known_csv = """\
station,Inflow,Inflow,Outflow,Outflow,Reference,Reference
result,log-lilliefors,pvalue,log-lilliefors,pvalue,log-lilliefors,pvalue
param,,,,,,
A,0.08548109,0.95458004,0.15443943,0.19715747,0.20141389,0.03268737
B,0.16162839,0.10505016,0.12447902,0.49697902,0.15934334,0.22969362
C,0.16957278,0.07248915,0.12388174,0.44379732,0.11746642,0.48915671
D,0.06885549,0.99,0.06067356,0.99,0.13401954,0.41967483
E,0.13506577,0.47186822,0.14552341,0.47797919,0.09164876,0.92860794
F,0.14420794,0.30694533,0.08463267,0.92741885,0.08586933,0.9800294
"""
check_stat(known_csv, dc.lilliefors_log)
@helpers.seed
def test_anderson_darling(dc):
with helpers.raises(NotImplementedError):
_ = dc.anderson_darling
@helpers.seed
def test_anderson_darling_log(dc):
with helpers.raises(NotImplementedError):
_ = dc.anderson_darling_log
@helpers.seed
def test_mann_whitney(dc):
known_csv = """\
,,mann_whitney,mann_whitney,mann_whitney,pvalue,pvalue,pvalue
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,180.0,179.0,,0.2198330905,0.4263216587
A,Outflow,282.0,,248.0,0.2198330905,,0.488580368
A,Reference,241.0,192.0,,0.4263216587,0.488580368,
B,Inflow,,345.0,317.0,,0.0766949991,0.0304383994
B,Outflow,183.0,,216.0,0.0766949991,,0.8650586835
B,Reference,139.0,202.0,,0.0304383994,0.8650586835,
C,Inflow,,282.0,323.0,,0.9097070273,0.6527104406
C,Outflow,294.0,,323.0,0.9097070273,,0.6527104406
C,Reference,277.0,277.0,,0.6527104406,0.6527104406,
D,Inflow,,285.0,263.0,,0.7718162376,0.8111960975
D,Outflow,315.0,,293.0,0.7718162376,,0.5082395211
D,Reference,241.0,232.0,,0.8111960975,0.5082395211,
E,Inflow,,164.0,188.0,,0.7033493939,0.9663820218
E,Outflow,140.0,,132.0,0.7033493939,,0.3813114322
E,Reference,192.0,188.0,,0.9663820218,0.3813114322,
F,Inflow,,201.0,172.0,,0.2505911218,0.8601783903
F,Outflow,303.0,,236.0,0.2505911218,,0.4045186043
F,Reference,185.0,172.0,,0.8601783903,0.4045186043
"""
check_stat(known_csv, dc.mann_whitney, comp=True)
@helpers.seed
def test_t_test(dc):
known_csv = """\
,,pvalue,pvalue,pvalue,t_test,t_test,t_test
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,0.2178424157,0.4563196599,,-1.2604458127,-0.7539785777
A,Outflow,0.2178424157,,0.5240147979,1.2604458127,,0.643450194
A,Reference,0.4563196599,0.5240147979,,0.7539785777,-0.643450194,
B,Inflow,,0.8430007638,0.3898358794,,0.1992705833,0.869235357
B,Outflow,0.8430007638,,0.5491097882,-0.1992705833,,0.6043850808
B,Reference,0.3898358794,0.5491097882,,-0.869235357,-0.6043850808,
C,Inflow,,0.1847386316,0.8191392537,,-1.3639360123,-0.2300373632
C,Outflow,0.1847386316,,0.2179907667,1.3639360123,,1.2615982727
C,Reference,0.8191392537,0.2179907667,,0.2300373632,-1.2615982727,
D,Inflow,,0.5484265023,0.344783812,,0.6056706932,0.9582600001
D,Outflow,0.5484265023,,0.6299742693,-0.6056706932,,0.4851636024
D,Reference,0.344783812,0.6299742693,,-0.9582600001,-0.4851636024,
E,Inflow,,0.2304569921,0.6770414622,,1.2287029977,-0.4198288251
E,Outflow,0.2304569921,,0.1023435465,-1.2287029977,,-1.6935358498
E,Reference,0.6770414622,0.1023435465,,0.4198288251,1.6935358498,
F,Inflow,,0.422008391,0.3549979666,,0.8190789273,0.9463539528
F,Outflow,0.422008391,,0.4988994144,-0.8190789273,,0.6826435968
F,Reference,0.3549979666,0.4988994144,,-0.9463539528,-0.6826435968
"""
check_stat(known_csv, dc.t_test, comp=True)
@helpers.seed
def test_levene(dc):
known_csv = """\
,,levene,levene,levene,pvalue,pvalue,pvalue
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,1.176282059,0.293152155,,0.284450688,0.591287419
A,Outflow,1.176282059,,0.397705309,0.284450688,,0.531863542
A,Reference,0.293152155,0.397705309,,0.591287419,0.531863542,
B,Inflow,,0.003559637,0.402002411,,0.952694449,0.529578712
B,Outflow,0.003559637,,0.408938588,0.952694449,,0.526247443
B,Reference,0.402002411,0.408938588,,0.529578712,0.526247443,
C,Inflow,,1.965613561,0.679535532,,0.167626459,0.413910674
C,Outflow,1.965613561,,1.462364363,0.167626459,,0.232602352
C,Reference,0.679535532,1.462364363,,0.413910674,0.232602352,
D,Inflow,,0.643364813,0.983777911,,0.426532092,0.32681669
D,Outflow,0.643364813,,0.116830634,0.426532092,,0.734124856
D,Reference,0.983777911,0.116830634,,0.32681669,0.734124856,
E,Inflow,,0.961616536,0.410491665,,0.333914902,0.525668596
E,Outflow,0.961616536,,2.726351564,0.333914902,,0.107912818
E,Reference,0.410491665,2.726351564,,0.525668596,0.107912818,
F,Inflow,,0.841984453,0.734809611,,0.363948105,0.396999375
F,Outflow,0.841984453,,0.25881357,0.363948105,,0.613802541
F,Reference,0.734809611,0.25881357,,0.396999375,0.613802541,
"""
check_stat(known_csv, dc.levene, comp=True)
@helpers.seed
def test_wilcoxon(dc):
known_csv = """\
,,wilcoxon,wilcoxon,wilcoxon,pvalue,pvalue,pvalue
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,32.0,59.0,,0.03479,0.430679
A,Outflow,32.0,,46.0,0.03479,,0.274445
A,Reference,59.0,46.0,,0.430679,0.274445,
B,Inflow,,38.0,22.0,,0.600179,0.182338
B,Outflow,38.0,,31.0,0.600179,,0.858863
B,Reference,22.0,31.0,,0.182338,0.858863,
C,Inflow,,75.0,120.0,,0.167807,0.601046
C,Outflow,75.0,,113.0,0.167807,,0.463381
C,Reference,120.0,113.0,,0.601046,0.463381,
D,Inflow,,44.0,31.0,,0.593618,0.530285
D,Outflow,44.0,,45.0,0.593618,,0.972125
D,Reference,31.0,45.0,,0.530285,0.972125,
E,Inflow,,21.0,19.0,,0.910156,0.386271
E,Outflow,21.0,,16.0,0.910156,,0.077148
E,Reference,19.0,16.0,,0.386271,0.077148,
F,Inflow,,62.0,22.0,,0.492459,0.952765
F,Outflow,62.0,,28.0,0.492459,,0.656642
F,Reference,22.0,28.0,,0.952765,0.656642,
"""
with pytest.warns(UserWarning):
check_stat(known_csv, dc.wilcoxon, comp=True)
@helpers.seed
def test_ranksums(dc):
known_csv = """\
,,pvalue,pvalue,pvalue,rank_sums,rank_sums,rank_sums
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,0.2153009,0.4187782,,-1.2391203,-0.8085428
A,Outflow,0.2153009,,0.4807102,1.2391203,,0.7051607
A,Reference,0.4187782,0.4807102,,0.8085428,-0.7051607,
B,Inflow,,0.0748817,0.029513,,1.781188,2.1765661
B,Outflow,0.0748817,,0.8547898,-1.781188,,0.1830104
B,Reference,0.029513,0.8547898,,-2.1765661,-0.1830104,
C,Inflow,,0.9015386,0.6455162,,-0.1237179,0.46
C,Outflow,0.9015386,,0.6455162,0.1237179,,0.46
C,Reference,0.6455162,0.6455162,,-0.46,-0.46,
D,Inflow,,0.7641772,0.8023873,,-0.3,0.2502587
D,Outflow,0.7641772,,0.5011969,0.3,,0.6726078
D,Reference,0.8023873,0.5011969,,-0.2502587,-0.6726078,
E,Inflow,,0.6911022,0.9551863,,0.3973597,-0.0561951
E,Outflow,0.6911022,,0.3727144,-0.3973597,,-0.8914004
E,Reference,0.9551863,0.3727144,,0.0561951,0.8914004,
F,Inflow,,0.2459307,0.8486619,,-1.1602902,-0.190826
F,Outflow,0.2459307,,0.3971011,1.1602902,,0.8468098
F,Reference,0.8486619,0.3971011,,0.190826,-0.8468098,
"""
check_stat(known_csv, dc.ranksums, comp=True)
@helpers.seed
@pytest.mark.xfail(OLD_SCIPY, reason="Scipy < 0.19")
def test_kendall(dc):
known_csv = """\
,,kendalltau,kendalltau,kendalltau,pvalue,pvalue,pvalue
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,-0.051661,-0.00738,,0.772893,0.967114
A,Outflow,-0.051661,,-0.083333,0.772893,,0.690095
A,Reference,-0.00738,-0.083333,,0.967114,0.690095,
B,Inflow,,0.441351,0.298246,,0.015267,0.119265
B,Outflow,0.441351,,0.559855,0.015267,,0.004202
B,Reference,0.298246,0.559855,,0.119265,0.004202,
C,Inflow,,0.280223,0.084006,,0.078682,0.578003
C,Outflow,0.280223,,-0.1417,0.078682,,0.352394
C,Reference,0.084006,-0.1417,,0.578003,0.352394,
D,Inflow,,0.403469,0.095299,,0.020143,0.634826
D,Outflow,0.403469,,0.318337,0.020143,,0.094723
D,Reference,0.095299,0.318337,,0.634826,0.094723,
E,Inflow,,0.114286,0.640703,,0.673337,0.004476
E,Outflow,0.114286,,0.167944,0.673337,,0.449603
E,Reference,0.640703,0.167944,,0.004476,0.449603,
F,Inflow,,0.0,0.07231,,1.0,0.763851
F,Outflow,0.0,,0.388889,1.0,,0.063
F,Reference,0.07231,0.388889,,0.763851,0.063,
"""
check_stat(known_csv, dc.kendall, comp=True)
@helpers.seed
def test_spearman(dc):
known_csv = """\
,,pvalue,pvalue,pvalue,spearmanrho,spearmanrho,spearmanrho
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,0.7574884491,0.9627447553,,-0.0809319588,0.012262418
A,Outflow,0.7574884491,,0.7617330788,-0.0809319588,,-0.0823529412
A,Reference,0.9627447553,0.7617330788,,0.012262418,-0.0823529412,
B,Inflow,,0.0110829791,0.0775159774,,0.5831305575,0.4537313433
B,Outflow,0.0110829791,,0.0024069317,0.5831305575,,0.6850916941
B,Reference,0.0775159774,0.0024069317,,0.4537313433,0.6850916941,
C,Inflow,,0.1330504059,0.6063501968,,0.3387640122,0.1134228342
C,Outflow,0.1330504059,,0.3431640379,0.3387640122,,-0.2070506455
C,Reference,0.6063501968,0.3431640379,,0.1134228342,-0.2070506455,
D,Inflow,,0.0195715066,0.4751861062,,0.4935814032,0.1858231711
D,Outflow,0.0195715066,,0.1263974782,0.4935814032,,0.363209462
D,Reference,0.4751861062,0.1263974782,,0.1858231711,0.363209462,
E,Inflow,,0.9828818202,0.0013596162,,0.0084033613,0.8112988341
E,Outflow,0.9828818202,,0.3413722947,0.0084033613,,0.3012263814
E,Reference,0.0013596162,0.3413722947,,0.8112988341,0.3012263814,
F,Inflow,,0.9645303744,0.6759971848,,-0.0106277141,0.1348767061
F,Outflow,0.9645303744,,0.0560590794,-0.0106277141,,0.5028571429
F,Reference,0.6759971848,0.0560590794,,0.1348767061,0.5028571429
"""
check_stat(known_csv, dc.spearman, comp=True)
@helpers.seed
def test_theilslopes(dc):
with helpers.raises(NotImplementedError):
_ = dc.theilslopes
def test_inventory(dc):
known_csv = StringIO(
dedent(
"""\
loc,param,Count,Non-Detect
Inflow,A,21,3
Inflow,B,24,6
Inflow,C,24,0
Inflow,D,24,11
Inflow,E,19,4
Inflow,F,21,8
Outflow,A,22,1
Outflow,B,22,9
Outflow,C,24,4
Outflow,D,25,12
Outflow,E,16,2
Outflow,F,24,8
Reference,A,20,2
Reference,B,19,6
Reference,C,25,4
Reference,D,21,12
Reference,E,20,3
Reference,F,17,7
"""
)
)
expected = pandas.read_csv(known_csv, index_col=[0, 1]).astype(int)
pdtest.assert_frame_equal(expected, dc.inventory.astype(int), check_names=False)
def test_inventory_noNDs(dc_noNDs):
known_csv = StringIO(
dedent(
"""\
loc,param,Count,Non-Detect
Inflow,A,21,0
Inflow,B,24,0
Inflow,C,24,0
Inflow,D,24,0
Inflow,E,19,0
Inflow,F,21,0
Outflow,A,22,0
Outflow,B,22,0
Outflow,C,24,0
Outflow,D,25,0
Outflow,E,16,0
Outflow,F,24,0
Reference,A,20,0
Reference,B,19,0
Reference,C,25,0
Reference,D,21,0
Reference,E,20,0
Reference,F,17,0
"""
)
)
expected = pandas.read_csv(known_csv, index_col=[0, 1]).astype(int)
pdtest.assert_frame_equal(
expected, dc_noNDs.inventory.astype(int), check_names=False,
)
@helpers.seed
def test_stat_summary(dc):
known_csv = StringIO(
dedent(
"""\
ros_res,loc,A,B,C,D,E,F
Count,Inflow,21,24,24,24,19,21
Count,Outflow,22,22,24,25,16,24
Count,Reference,20,19,25,21,20,17
Non-Detect,Inflow,3.0,6.0,0.0,11.0,4.0,8.0
Non-Detect,Outflow,1.0,9.0,4.0,12.0,2.0,8.0
Non-Detect,Reference,2.0,6.0,4.0,12.0,3.0,7.0
mean,Inflow,2.64668,7.64717,0.51325,3.02124,1.9147,9.8254
mean,Outflow,5.24928,6.86384,1.00464,2.31881,1.09824,3.45018
mean,Reference,3.77797,4.50425,0.54196,1.94583,2.28329,2.49171
std,Inflow,3.67506,12.62594,0.36136,4.91543,2.62027,35.29825
std,Outflow,8.92456,13.92253,1.72758,2.90815,1.13267,5.47634
std,Reference,5.67123,11.05411,0.5035,2.3037,2.8617,3.50296
min,Inflow,0.0756,0.17404,0.10213,0.05365,0.08312,0.00803
min,Outflow,0.11177,0.02106,0.03578,0.11678,0.07425,0.06377
min,Reference,0.15575,0.04909,0.04046,0.08437,0.05237,0.03445
10%,Inflow,0.1772,0.45233,0.13467,0.15495,0.1763,0.03548
10%,Outflow,0.44852,0.08297,0.08222,0.26949,0.19903,0.18008
10%,Reference,0.38448,0.13467,0.08241,0.19355,0.12777,0.09457
25%,Inflow,0.5226,1.47254,0.16401,0.35688,0.36475,0.12007
25%,Outflow,0.90603,0.25113,0.26752,0.51699,0.31151,0.40613
25%,Reference,1.09472,0.31423,0.13646,0.3839,0.39466,0.22443
50%,Inflow,1.19725,2.77399,0.52596,1.20189,1.07086,0.83249
50%,Outflow,2.23106,1.5465,0.39698,1.36276,0.51675,1.51094
50%,Reference,1.63947,1.56508,0.41269,0.8827,0.80716,0.74599
75%,Inflow,2.56354,4.72887,0.77639,3.04268,1.53278,1.79299
75%,Outflow,3.83802,2.84995,0.85354,2.79341,1.59183,2.80979
75%,Reference,2.65065,2.26185,0.79261,3.61179,3.20153,2.74225
90%,Inflow,6.02835,24.40655,0.99293,8.00691,6.28345,8.51706
90%,Outflow,12.43052,23.90022,2.43829,5.66731,2.30348,10.32829
90%,Reference,12.58278,6.67125,1.2205,4.78255,7.72012,8.57303
max,Inflow,13.87664,45.97893,1.26657,21.75505,8.88365,163.01001
max,Outflow,36.58941,47.49381,8.04948,12.39894,4.19118,23.29367
max,Reference,21.22363,48.23615,1.94442,7.67751,8.75609,10.5095
"""
)
)
expected = pandas.read_csv(known_csv, index_col=[0, 1]).T
pdtest.assert_frame_equal(
expected.round(5),
dc.stat_summary().round(5),
check_names=False,
check_dtype=False,
rtol=1e-4,
)
def test_locations(dc):
for loc in dc.locations:
assert isinstance(loc, Location)
assert len(dc.locations) == 18
assert dc.locations[0].definition == {"loc": "Inflow", "param": "A"}
assert dc.locations[1].definition == {"loc": "Inflow", "param": "B"}
def test_datasets(dc):
_ds = []
for d in dc.datasets("Inflow", "Outflow"):
assert isinstance(d, Dataset)
_ds.append(d)
assert len(_ds) == 6
assert _ds[0].definition == {"param": "A"}
assert _ds[1].definition == {"param": "B"}
# this sufficiently tests dc._filter_collection
def test_selectLocations(dc):
locs = dc.selectLocations(param="A", loc=["Inflow", "Outflow"])
assert len(locs) == 2
for n, (loc, loctype) in enumerate(zip(locs, ["Inflow", "Outflow"])):
assert isinstance(loc, Location)
assert loc.definition["param"] == "A"
assert loc.definition["loc"] == loctype
def test_selectLocations_squeeze_False(dc):
locs = dc.selectLocations(param="A", loc=["Inflow"], squeeze=False)
assert len(locs) == 1
for n, loc in enumerate(locs):
assert isinstance(loc, Location)
assert loc.definition["param"] == "A"
assert loc.definition["loc"] == "Inflow"
def test_selectLocations_squeeze_True(dc):
loc = dc.selectLocations(param="A", loc=["Inflow"], squeeze=True)
assert isinstance(loc, Location)
assert loc.definition["param"] == "A"
assert loc.definition["loc"] == "Inflow"
def test_selectLocations_squeeze_True_None(dc):
loc = dc.selectLocations(param="A", loc=["Junk"], squeeze=True)
assert loc is None
# since the test_selectLocations* tests stress _filter_collection
# enough, we'll mock it out for datasets:
def test_selectDatasets(dc):
with mock.patch.object(dc, "_filter_collection") as _fc:
with mock.patch.object(dc, "datasets", return_value=["A", "B"]) as _ds:
dc.selectDatasets("Inflow", "Reference", foo="A", bar="C")
_ds.assert_called_once_with("Inflow", "Reference")
_fc.assert_called_once_with(["A", "B"], foo="A", bar="C", squeeze=False)
@pytest.mark.parametrize("func", [stats.mannwhitneyu, stats.wilcoxon])
@pytest.mark.parametrize(
("x", "all_same"), [([5, 5, 5, 5, 5], True), ([5, 6, 7, 7, 8], False)]
)
def test_dist_compare_wrapper(x, all_same, func):
y = [5, 5, 5, 5, 5]
with mock.patch.object(stats, func.__name__) as _test:
result = _dist_compare(x, y, _test)
if all_same:
assert numpy.isnan(result.stat)
assert numpy.isnan(result.pvalue)
assert _test.call_count == 0
else:
# assert result == (0, 0)
_test.assert_called_once_with(x, y, alternative="two-sided")
| bsd-3-clause |
legacysurvey/rapala | ninetyprime/linearitycheck.py | 2 | 17953 | #!/usr/bin/env python
import os
import glob
import numpy as np
import fitsio
import matplotlib.pyplot as plt
from matplotlib import ticker
from matplotlib.backends.backend_pdf import PdfPages
from astropy.table import Table
from bokpipe import *
from bokpipe.bokoscan import _convertfitsreg
def init_data_map(datadir,outdir,expTimes=None,files=None):
dataMap = {}
if not os.path.exists(outdir):
os.mkdir(outdir)
dataMap['outdir'] = outdir
if files is None:
dataMap['files'] = sorted(glob.glob(datadir+'*.fits') +
glob.glob(datadir+'*.fits.gz') +
glob.glob(datadir+'*.fits.fz'))
else:
dataMap['files'] = files
dataMap['rawFiles'] = dataMap['files']
dataMap['oscan'] = bokio.FileNameMap(outdir)
dataMap['proc'] = bokio.FileNameMap(outdir,'_p')
dataMap['files'] = [ dataMap['oscan'](f) for f in dataMap['files'] ]
if expTimes is None:
dataMap['expTime'] = np.array([fitsio.read_header(f)['EXPTIME']
for f in dataMap['files']])
else:
dataMap['expTime'] = expTimes
try:
# assume they are all the same
dataMap['dataSec'] = \
_convertfitsreg(fitsio.read_header(
dataMap['files'][0],'IM4')['DATASEC'])
except IOError:
pass
return dataMap
def process_data(dataMap,redo=True,withvar=True,oscanims=False,bias2d=False):
oscanSubtract = BokOverscanSubtract(output_map=dataMap['oscan'],
overwrite=redo,
write_overscan_image=oscanims,
oscan_cols_file=dataMap['outdir']+'oscan_cols',
oscan_rows_file=dataMap['outdir']+'oscan_rows',
verbose=10)#method='median_value')
oscanSubtract.process_files(dataMap['rawFiles'])
if bias2d:
biasname = 'bias'
biasStack = bokproc.BokBiasStack(#reject=None,
overwrite=redo,
with_variance=withvar)
bias2dFile = os.path.join(dataMap['outdir'],biasname+'.fits')
biasStack.stack(dataMap['biasFiles'],bias2dFile)
#imProcess = bokproc.BokCCDProcess(bias2dFile,
# output_map=dataMap['proc'])
#imProcess.process_files(flatFrames)
def imstat(dataMap,outfn='stats'):
from astropy.stats import sigma_clip
from scipy.stats import mode,scoreatpercentile
array_stats = bokutil.array_stats
fnlen = len(os.path.basename(dataMap['files'][0]))
st = np.zeros(len(dataMap['flatSequence']),
dtype=[('file','S%d'%fnlen),
('expTime','f4'),
('median','16f4'),
('mean','16f4'),
('mode','16f4'),
('iqr25','16f4'),
('iqr75','16f4'),
('iqr10','16f4'),
('iqr90','16f4')])
for _i,i in enumerate(dataMap['flatSequence']):
expTime = dataMap['expTime'][i]
fn = os.path.basename(dataMap['files'][i])
fits = fitsio.FITS(dataMap['files'][i])
print '%s %4.1f ' % (fn,expTime),
st['file'][_i] = fn
st['expTime'][_i] = expTime
for j,extn in enumerate(['IM%d' % n for n in range(1,17)]):
modeVal,pix = array_stats(fits[extn].read()[dataMap['statsPix']],
method='mode',retArray=True)
st['mode'][_i,j] = modeVal
st['mean'][_i,j] = pix.mean()
st['median'][_i,j] = np.ma.median(pix)
st['iqr25'][_i,j] = scoreatpercentile(pix,25)
st['iqr75'][_i,j] = scoreatpercentile(pix,75)
st['iqr10'][_i,j] = scoreatpercentile(pix,10)
st['iqr90'][_i,j] = scoreatpercentile(pix,90)
print '%5d ' % (modeVal),
print
fitsio.write(outfn+'.fits',st,clobber=True)
def scaled_histograms(dataMap,nims=None,outfn='pixhist'):
pdf = PdfPages(outfn+'.pdf')
for _i,i in enumerate(dataMap['flatSequence']):
if nims is not None and _i==nims:
break
expTime = dataMap['expTime'][i]
expScale = dataMap['refExpTime'] / expTime
print dataMap['files'][i]
fn = os.path.basename(dataMap['files'][i])
fits = fitsio.FITS(dataMap['files'][i])
fig = plt.figure(figsize=(8.0,10))
plt.subplots_adjust(0.08,0.08,0.92,0.92,0.3,0.35)
for j,extn in enumerate(['IM%d' % n for n in range(1,17)]):
ax = plt.subplot(8,2,j+1)
pix = fits[extn].read()[dataMap['statsPix']]
ax.hist(expScale*pix.flatten(),100,(0,40000),edgecolor='none')
ax.text(0.05,0.9,extn,va='top',size=9,transform=ax.transAxes)
ax.set_xlim(0,40000)
ax.xaxis.set_major_locator(ticker.MultipleLocator(10000))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(2000))
ax.yaxis.set_major_locator(ticker.MultipleLocator(50000))
plt.figtext(0.5,0.99,fn+' exp=%.1f' % expTime,ha='center',va='top')
pdf.savefig(fig)
plt.close(fig)
pdf.close()
def plot_sequence(dataMap,st,imNum,which='median'):
expScale = dataMap['refExpTime']/st['expTime']
seqno = 1 + np.arange(len(st))
ref = np.isclose(expScale,1.0)
j = imNum - 1
plt.figure(figsize=(8,6))
plt.subplots_adjust(0.11,0.08,0.96,0.95)
plt.errorbar(seqno[ref],expScale[ref]*st[which][ref,j],
[expScale[ref]*(st[which]-st['iqr10'])[ref,j],
expScale[ref]*(st['iqr90']-st[which])[ref,j]],
fmt='bs-')
plt.errorbar(seqno[~ref],expScale[~ref]*st[which][~ref,j],
[expScale[~ref]*(st[which]-st['iqr10'])[~ref,j],
expScale[~ref]*(st['iqr90']-st[which])[~ref,j]],
fmt='cs-')
#plt.scatter(seqno,expScale*st['mode'][:,j],marker='+',c='r')
#plt.scatter(seqno,expScale*st['mean'][:,j],marker='x',c='g')
plt.xlabel('sequence number')
plt.ylabel('counts scaled by exp time')
plt.title('IM%d'%imNum)
plt.xlim(0.5,len(st)+0.5)
def fit_ref_exposures(dataMap,st,imNum,
which='median',method='spline',doplot=False):
from scipy.interpolate import UnivariateSpline
seqno = 1 + np.arange(len(st))
t = st['expTime']
ref = np.isclose(t,dataMap['refExpTime'])
j = imNum - 1
refCounts = st[which][ref,j][0]
if method=='linear':
_fit = np.polyfit(seqno[ref],refCounts/st[which][ref,j],1)
fit = lambda x: np.polyval(_fit,x)
elif method=='spline':
fit = UnivariateSpline(seqno[ref],refCounts/st[which][ref,j],
s=1e-5,k=3)
else:
raise ValueError
if doplot:
plt.figure()
plt.subplot(211)
plt.plot(seqno[ref],st[which][ref,j],'bs-')
plt.plot(seqno,refCounts/fit(seqno),c='r')
plt.subplot(212)
plt.plot(seqno[ref],(st[which][ref,j]-refCounts/fit(seqno[ref]))
/st[which][ref,j],'bs-')
plt.axhline(0,c='r')
return fit
def plot_linearity_curves(dataMap,st,which='median',correct=True,isPTC=False,
refCor=None,fitmethod='spline',outfn='linearity',
onlyim=None):
seqno = 1 + np.arange(len(st))
t = st['expTime']
print seqno,t
refExpTime = dataMap['refExpTime']
ref = np.isclose(t,refExpTime)
refCorFit = None
ii = np.arange(len(st))
# only use the increasing sequence, not the reference exposures
ii = ii[~ref]
if isPTC:
# for PTCs skip every other image since they are done in pairs
ii = ii[::2]
# only fit to unsaturated frames
try:
firstsat = np.where(np.any(st[which][ii,:] > 55000,axis=1))[0][0]
except IndexError:
firstsat = -1
if onlyim is None:
pdf = PdfPages(outfn+'.pdf')
for imNum in range(1,17):
if onlyim is not None and imNum != onlyim:
continue
j = imNum - 1
# correct lamp variation
if correct:
if refCor is None:
fscl_fit = fit_ref_exposures(dataMap,st,imNum,which,
method=fitmethod)
else:
if refCorFit is None:
refCorFit = fit_ref_exposures(dataMap,st,imNum,which)
fscl_fit = refCorFit
fscl = fscl_fit(seqno)
else:
fscl = np.ones_like(seqno)
fit = np.polyfit(t[ii[:firstsat]],
fscl[ii[:firstsat]]*st[which][ii[:firstsat],j],1)
fitv = np.polyval(fit,t)
slope = fit[0] / (st[which][ref,j][0]/refExpTime)
#
pltindex = imNum % 4
if onlyim is None:
if pltindex == 1:
fig = plt.figure(figsize=(8,10))
plt.subplots_adjust(0.11,0.08,0.96,0.95,0.25,0.2)
ax = plt.subplot(4,2,2*(j%4)+1)
else:
fig = plt.figure(figsize=(6,2.5))
plt.subplots_adjust(0.11,0.23,0.99,0.98,0.35,0.2)
ax = plt.subplot(1,2,1)
plt.plot(t[ii],fscl[ii]*st[which][ii,j],'bs-')
plt.xlim(0.9*t.min(),t.max()+0.5)
plt.xscale('log')
plt.ylim(1e2,9e4)
plt.yscale('log')
plt.ylabel('counts [%s]' % which)
tt = np.logspace(-1,np.log10(1.3*t.max()),100)
plt.plot(tt,np.polyval(fit,tt),c='r')
plt.text(0.05,0.9,'IM%d'%imNum,va='top',transform=ax.transAxes)
plt.text(0.95,0.18,r'y = %.1f $\times$ t + %.1f' % tuple(fit),
ha='right',va='top',size=9,transform=ax.transAxes)
plt.text(0.95,0.10,r'y = %.3f $\times$ counts + %.1f' % (slope,fit[1]),
ha='right',va='top',size=9,transform=ax.transAxes)
if pltindex==0 or onlyim is not None:
plt.xlabel('exptime (s)')
#
if onlyim is None:
ax = plt.subplot(4,2,2*(j%4)+2)
else:
ax = plt.subplot(1,2,2)
plt.plot(t[ii],100*(fscl[ii]*st[which][ii,j]-fitv[ii])/fitv[ii],'bs-')
plt.axhline(0,c='r')
#ax.xaxis.set_major_locator(ticker.MultipleLocator(10))
#ax.xaxis.set_minor_locator(ticker.MultipleLocator(2))
ax.yaxis.set_major_locator(ticker.MultipleLocator(2))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(0.5))
plt.ylim(-5,5)
plt.xlim(0.9*t.min(),t.max()+0.5)
plt.xscale('log')
if pltindex==0 or onlyim is not None:
plt.xlabel('exptime (s)')
plt.ylabel('residual \%')
if onlyim is None:
if pltindex == 0:
pdf.savefig(fig)
plt.close(fig)
if onlyim is None:
pdf.close()
def rel_gain(dataMap,st,which='median',correct=True,fitmethod='spline',
nskip=0):
seqno = 1 + np.arange(len(st))
t = st['expTime']
refExpTime = dataMap['refExpTime']
ref = np.isclose(t,refExpTime)
refCorFit = None
ii = np.arange(len(st))
ii = ii[~ref]
ii = ii[nskip:]
sky4 = st[which][ii,3]
fit_ii = ii[np.where((sky4>5000)&(sky4<25000))[0]]
plt.figure()
for imNum in range(1,17):
j = imNum - 1
# correct lamp variation
if correct:
if True: #refCor is None:
fscl_fit = fit_ref_exposures(dataMap,st,imNum,which,
method=fitmethod)
else:
if refCorFit is None:
refCorFit = fit_ref_exposures(dataMap,st,imNum,which)
fscl_fit = refCorFit
fscl = fscl_fit(seqno)
else:
fscl = np.ones_like(seqno)
fit = np.polyfit(t[fit_ii],fscl[fit_ii]*st[which][fit_ii,j],1)
fitv = np.polyval(fit,t)
# slope = fit[0] / (st[which][ref,j][0]/refExpTime)
xx = np.array(0,1.1*t.max())
plt.subplot(4,4,imNum)
if False:
plt.scatter(t[ii],fscl[ii]*st[which][ii,j])
plt.plot(xx,np.polyval(fit,xx),c='r')
else:
plt.scatter(t[ii],fscl[ii]*st[which][ii,j]/fitv[ii])
plt.axhline(1,c='r')
plt.ylim(0.7,1.3)
if True:
plt.xscale('log')
plt.xlim(0.9*t.min(),1.1*t.max())
def get_first_saturated_frame(seq):
try:
firstsat = np.where(seq > 55000)[0][0]
except IndexError:
firstsat = -1
return firstsat
def compare_oscan_levels(dataMap,st):
files = [ dataMap['files'][i] for i in dataMap['flatSequence'] ]
oscans = np.zeros((len(files),16))
for j in range(16):
oscans[:,j] = [ fitsio.read_header(f,'IM%d'%(j+1))['OSCANMED']
for f in files ]
seqno = 1 + np.arange(len(st))
plt.figure()
for j in range(8,16):
ax = plt.subplot(8,2,2*(j%8)+1)
i1 = get_first_saturated_frame(st['median'][:,j])
plt.scatter(st['median'][:i1,j],oscans[:i1,j],c='b')
plt.ylabel('IM%d'%(j+1))
ax = plt.subplot(8,2,2*(j%8)+2)
plt.scatter(seqno[:i1],oscans[:i1,j],c='b')
def init_sep09bss_data_map():
datadir = os.environ.get('BASSDATA')+'/20150909/bss/20150908/'
exptimes = np.loadtxt(datadir+'../bss.20150909.log',usecols=(3,))
exptimes = exptimes[50:]
print exptimes
rdxdir = os.environ.get('GSCRATCH','tmp_sep')+'/bss_sep09/'
if not os.path.exists(rdxdir):
os.makedirs(rdxdir)
dataMap = init_data_map(datadir,rdxdir,
expTimes=exptimes,files=None)
dataMap['rawFiles'] = dataMap['rawFiles'][50:]
dataMap['files'] = dataMap['files'][50:]
dataMap['biasFiles'] = dataMap['files'][-5:]
#dataMap['flatSequence'] = range(50,68)
dataMap['flatSequence'] = range(18)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = 40.0
return dataMap
def init_sep29ptc_data_map():
dataMap = init_data_map(
"/home/ian/dev/rapala/bokpipe/scratch/sep29ptcs/ptc/",'sep29ptcs/')
dataMap['biasFiles'] = [dataMap['files'][0],]
dataMap['flatSequence'] = range(1,len(dataMap['files']))
dataMap['statsPix'] = np.s_[20:-20,100:-100]
dataMap['refExpTime'] = 10.0
return dataMap
def init_oct02ptc_data_map():
dataMap = init_data_map(os.environ.get('GSCRATCH')+'/02oct15/ptc/',
os.environ.get('GSCRATCH')+'/02oct15/ptc_proc/')
dataMap['biasFiles'] = [dataMap['files'][0],]
dataMap['flatSequence'] = range(1,len(dataMap['files']))
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = 10.0
return dataMap
def init_oct20_data_map():
datadir = os.environ.get('BASSDATA')+'/20151020/'
exptimes = np.loadtxt(datadir+'images.log',usecols=(6,))
nuse = 53
exptimes = exptimes[:nuse]
print exptimes
dataMap = init_data_map(datadir,'tmp_oct20',expTimes=exptimes)
dataMap['rawFiles'] = dataMap['rawFiles'][:nuse]
dataMap['files'] = [ dataMap['oscan'](f)
for f in dataMap['files'][:nuse] ]
dataMap['biasFiles'] = dataMap['files'][:20]
dataMap['flatSequence'] = range(20,nuse)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = 3.0
return dataMap
def init_nov11g_data_map():
datadir = os.environ.get('BASSDATA')+'/Nov2015/'
log = Table.read(datadir+'bassLog_Nov2015.fits')
exptimes = log['expTime'][111:150]
files = [ datadir+f['utDir']+'/'+f['fileName']+'.fits'
for f in log[111:150] ]
dataMap = init_data_map(datadir,'tmp_nov11g',
expTimes=exptimes,files=files)
dataMap['biasFiles'] = dataMap['files'][-10:]
dataMap['flatSequence'] = np.arange(len(dataMap['files'])-10)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = 3.0
return dataMap
def init_nov14_data_map(filt):
datadir = os.environ.get('BASSDATA')+'/Nov2015/'
log = Table.read(datadir+'bassLog_Nov2015.fits')
if filt=='g':
frames = np.r_[np.s_[297:345],np.s_[247:257]]
else:
frames = np.r_[np.s_[345:393],np.s_[247:257]]
exptimes = log['expTime'][frames]
files = [ datadir+f['utDir']+'/'+f['fileName']+'.fits'
for f in log[frames] ]
dataMap = init_data_map(datadir,'tmp_nov14'+filt,
expTimes=exptimes,files=files)
dataMap['biasFiles'] = dataMap['files'][-10:]
dataMap['flatSequence'] = np.arange(len(dataMap['files'])-10)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = {'Ha':10.0,'g':3.0}[filt]
return dataMap
def init_jan3_data_map(filt):
datadir = os.environ.get('BASSDATA')
log = Table.read('basslogs/log_ut20160103.fits')
if filt=='g':
frames = np.r_[np.s_[57:105],np.s_[160:170]]
else:
frames = np.r_[np.s_[105:160],np.s_[160:170]]
exptimes = log['expTime'][frames]
files = [ datadir+'/'+f['utDir'].strip()+'/'+f['fileName'].strip()+'.fits'
for f in log[frames] ]
dataMap = init_data_map(datadir,'tmp_jan3'+filt,
expTimes=exptimes,files=files)
dataMap['biasFiles'] = dataMap['files'][-10:]
dataMap['flatSequence'] = np.arange(len(dataMap['files'])-10)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = {'Ha':10.0,'g':3.0}[filt]
return dataMap
def init_data_map_fromfile(filename,outdir='tmp',nersc=True):
datadir = os.environ.get('BASSDATA')
if nersc:
datadir = os.path.join(datadir,'BOK_Raw')
log = np.loadtxt(filename,dtype=[('frameNum','i4'),('utDir','S8'),
('fileName','S35'),
('imType','S10'),('filter','S8'),
('expTime','f4')],skiprows=1)
exptimes = log['expTime']
files = [ datadir+'/'+f['utDir'].strip()+'/'+f['fileName'].strip()+'.fits'
for f in log ]
if nersc:
files = [ f+'.fz' for f in files ]
dataMap = init_data_map(datadir,outdir,
expTimes=exptimes,files=files)
dataMap['biasFiles'] = np.array(dataMap['files'])[log['imType']=='zero']
dataMap['flatSequence'] = np.where(log['imType']=='flat')[0]
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
# assume it starts with reference
dataMap['refExpTime'] = exptimes[dataMap['flatSequence'][0]]
return dataMap
if __name__=='__main__':
import sys
dataset = sys.argv[1]
if dataset == 'sep09bss':
dataMap = init_sep09bss_data_map()
elif dataset == 'oct02':
dataMap = init_oct02ptc_data_map()
elif dataset == 'oct20':
dataMap = init_oct20_data_map()
elif dataset == 'nov11g':
dataMap = init_nov11g_data_map()
elif dataset == 'nov14g':
dataMap = init_nov14_data_map('g')
elif dataset == 'nov14Ha':
dataMap = init_nov14_data_map('Ha')
elif dataset == 'jan3g':
dataMap = init_jan3_data_map('g')
elif dataset == 'jan3Ha':
dataMap = init_jan3_data_map('Ha')
else:
dataMap = init_data_map_fromfile(sys.argv[2],dataset)
print 'processing ',dataset
if not os.path.exists('stats_'+dataset+'.fits'):
process_data(dataMap,bias2d=True)
imstat(dataMap,outfn='stats_'+dataset)
st = fitsio.read('stats_'+dataset+'.fits')
plot_linearity_curves(dataMap,st,outfn='linearity_'+dataset)
if True:
plot_linearity_curves(dataMap,st,outfn='linearity_'+dataset,
onlyim=4)
plt.savefig('linearity_IM4_%s.png'%dataset)
plot_sequence(dataMap,st,4)
plt.savefig('linsequence_IM4_%s.png'%dataset)
| bsd-3-clause |
Jailander/COSMOS | kriging_exploration/scripts/explorator.py | 1 | 34183 | #!/usr/bin/env python
import cv2
import sys
import yaml
import signal
import numpy as np
#import utm
import matplotlib as mpl
import matplotlib.cm as cm
import rospy
import argparse
import actionlib
from cosmos_msgs.msg import KrigInfo
from cosmos_msgs.srv import CompareModels
import kriging_exploration.map_coords
import std_msgs.msg
import open_nav.msg
from kriging_exploration.data_grid import DataGrid
from kriging_exploration.map_coords import MapCoords
from kriging_exploration.visualiser import KrigingVisualiser
from kriging_exploration.canvas import ViewerCanvas
from kriging_exploration.topological_map import TopoMap
from kriging_exploration.exploration import ExplorationPlan
from sensor_msgs.msg import NavSatFix
def overlay_image_alpha(img, img_overlay):
"""Overlay img_overlay on top of img at the position specified by
pos and blend using alpha_mask.
"""
show_image = img.copy()
alpha = img_overlay[:, :, 3] / 255.0 # Alpha mask must contain values
# within the range [0, 1]
# and be the same size as img_overlay.
# Image ranges
y1, y2 = 0, img.shape[0]
x1, x2 = 0, img.shape[1]
channels = img.shape[2]
alpha_inv = 1.0 - alpha
for c in range(channels):
show_image[y1:y2, x1:x2, c] = (alpha * img_overlay[y1:y2, x1:x2, c] + alpha_inv * img[y1:y2, x1:x2, c])
return show_image
class Explorator(KrigingVisualiser):
#_w_shape=[(0, 16), (1, 17), (3, 17), (5, 16), (8, 15), (10, 15), (12, 14), (14, 13), (12, 12), (10, 11), (8, 11), (5, 10), (8, 9), (10, 9), (12, 8), (14, 7), (12, 6), (10, 5), (8, 5), (6, 4), (4, 3), (3, 2), (4, 1), (5, 0), (7, 0)]
#_w_shape=[(17, 0), (17, 1), (17, 3), (16, 5), (15, 8), (15, 10), (14, 12), (13, 14), (12, 12), (11, 10), (11, 8), (10, 5), (9, 8), (9, 10), (8, 12), (7, 14), (6, 12), (5, 10), (5, 8), (4, 6), (3, 4), (2, 3), (1, 4), (0, 5), (0, 7)]
#_w_shape=[(17, 0), (17,1), (17, 2), (17, 4), (16, 4), (16, 6), (16, 8), (15, 8), (15, 10), (14, 10), (14, 12), (13, 12), (13, 14), (12, 14), (12, 12), (11, 12), (11, 10), (10, 10), (10, 8), (10, 6), (10, 4), (9, 4), (9, 6), (9, 8), (9, 10), (8, 10), (8, 12), (7, 12), (7, 14), (6, 14), (6, 12), (5, 12), (5, 10), (4, 10), (4, 8), (4, 6), (4, 4), (3, 4), (3, 3), (2, 3), (2, 4), (1,4), (1, 6), (0,6), (1, 8), (0,8), (1, 10), (0, 10), (0, 12), (0, 14)]
_w_shape=[(17, 0), (16, 1), (14, 6), (12, 11), (10, 14), (8, 9), (5, 14), (3, 11), (2, 6), (0, 3)]
def __init__(self, lat_deg, lon_deg, zoom, size, args):
self.targets = []
self.results =[]
self.result_counter=0
self.explodist=0
self.running = True
self.last_coord=None
signal.signal(signal.SIGINT, self.signal_handler)
self.expid=args.experiment_name
print "Creating visualiser object"
super(Explorator, self).__init__(lat_deg, lon_deg, zoom, size)
cv2.namedWindow('explorator')
cv2.setMouseCallback('explorator', self.click_callback)
self.current_model=-1
self.draw_mode = 'none'
self.grid = DataGrid(args.limits_file, args.cell_size)
self.topo_map= TopoMap(self.grid)
self.visited_wp=[]
explo_type = args.area_coverage_type
self.define_exploration_type(explo_type)
self.navigating = False
self.pause_exp = False
self.exploring = 0
self.n_inputs = 0
print "NUMBER OF TARGETS:"
print len(self.explo_plan.targets)
self.limits_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.grid_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.exploration_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.gps_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.limits_canvas.draw_polygon(self.grid.limits, (0,0,255,128), thickness=1)
self.grid_canvas.draw_grid(self.grid.cells, args.cell_size, (128,128,128,2), thickness=1)
self.redraw()
self.redraw_kriged=True
self.redraw_var=True
self.redraw_devi=True
self.model_canvas=[]
self.model_legend=[]
self.kriging_canvas=[]
self.klegend_canvas=[]
self.klegend2_canvas=[]
self.klegend3_canvas=[]
self.sigma_canvas=[]
self.sigma2_canvas=[]
self.model_canvas_names=[]
self.mean_out_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_out_legend_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_var_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_var_legend_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_dev_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_dev_legend_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
rospy.loginfo("Subscribing to Krig Info")
rospy.Subscriber("/kriging_data", KrigInfo, self.data_callback)
rospy.Subscriber("/fix", NavSatFix, self.gps_callback)
rospy.Subscriber('/penetrometer_scan', std_msgs.msg.String, self.scan_callback)
self.req_data_pub = rospy.Publisher('/request_scan', std_msgs.msg.String, latch=False, queue_size=1)
rospy.loginfo(" ... Connecting to Open_nav")
self.open_nav_client = actionlib.SimpleActionClient('/open_nav', open_nav.msg.OpenNavAction)
self.open_nav_client.wait_for_server()
rospy.loginfo(" ... done")
tim1 = rospy.Timer(rospy.Duration(0.2), self.drawing_timer_callback)
tim2 = rospy.Timer(rospy.Duration(0.1), self.control_timer_callback)
self.refresh()
while(self.running):
cv2.imshow('explorator', self.show_image)
k = cv2.waitKey(20) & 0xFF
self._change_mode(k)
tim1.shutdown()
tim2.shutdown()
cv2.destroyAllWindows()
sys.exit(0)
# EXPLORATION PARAMS HERE!!!!
def define_exploration_type(self, explo_type):
self.exploration_strategy=explo_type
self.n_goals=10
if explo_type=='area_split':
self.grid._split_area(3,3)
sb=[]
for i in self.grid.area_splits_coords:
(y, x) = self.grid.get_cell_inds_from_coords(i)
sb.append((x,y))
self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent, ac_model=explo_type, ac_coords=sb)
elif explo_type=='random':
self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent)
elif explo_type=='w_shape':
self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent, ac_model=explo_type, ac_coords=self._w_shape)
else: #greedy
self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent, exploration_type='greedy', ac_model=explo_type)
def drawing_timer_callback(self, event):
self.refresh()
def control_timer_callback(self, event):
if self.navigating:
if self.open_nav_client.simple_state ==2:
print "DONE NAVIGATING"
self.navigating = False
if self.exploring==1:
self.exploring=2
elif self.exploring==2:
if not self.pause_exp:
self.explo_plan.explored_wp.append(self.explo_plan.route.pop(0))
info_str='Do_reading'
self.req_data_pub.publish(info_str)
self.exploring=3
elif self.exploring==4:
if not self.pause_exp:
if len(self.explo_plan.route) >0:
gg=self.explo_plan.route[0]
self.open_nav_client.cancel_goal()
targ = open_nav.msg.OpenNavActionGoal()
targ.goal.coords.header.stamp=rospy.Time.now()
targ.goal.coords.latitude=gg.coord.lat
targ.goal.coords.longitude=gg.coord.lon
print "Going TO: ", gg
self.exploring=1
self.navigating=True
self.open_nav_client.send_goal(targ.goal)
else:
print "Done Exploring"
self.exploring = 0
# else:
# if self.exploring:
# print "waiting for new goal"
def gps_callback(self, data):
if not np.isnan(data.latitude):
self.gps_canvas.clear_image()
gps_coord = MapCoords(data.latitude,data.longitude)
self.gps_canvas.draw_coordinate(gps_coord,'black',size=2, thickness=2, alpha=255)
if self.last_coord:
dist = gps_coord - self.last_coord
self.explodist+= dist[0]
self.last_coord=gps_coord
def data_callback(self, msg):
point_coord = kriging_exploration.map_coords.coord_from_satnav_fix(msg.coordinates)
for i in msg.data:
self.grid.add_data_point(i.model_name, point_coord, i.measurement)
self.vmin, self.vmax = self.grid.get_max_min_vals()
self.n_models=len(self.grid.models)
for i in self.grid.models:
if i.name not in self.model_canvas_names:
print i.name
self.model_canvas_names.append(i.name)
self.model_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.model_legend.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.kriging_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.klegend_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.klegend2_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.klegend3_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.sigma_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.sigma2_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.draw_inputs(self.model_canvas_names.index(i.name))
self.n_inputs+=1
if self.exploring==3:
if self.n_inputs>3:
self.krieg_all_mmodels()
rospy.sleep(0.1)
self.grid.calculate_mean_grid()
rospy.sleep(0.1)
self.draw_means()
self.draw_mode="means"
resp = self.get_errors()
self.result_counter+=1
d={}
d['step']=self.result_counter
d['id']=self.expid
d['ns']=len(self.explo_plan.targets)
d['coord']={}
d['coord']['lat']=self.last_coord.lat
d['coord']['lon']=self.last_coord.lon
d['dist']=float(self.explodist)
d['results']={}
d['results']['groundtruth']=resp
d['results']['var']={}
d['results']['var']['mean']={}
d['results']['var']['mean']['mean']= float(np.mean(self.grid.mean_variance))
d['results']['var']['mean']['max']= float(np.max(self.grid.mean_variance))
d['results']['var']['mean']['min']= float(np.min(self.grid.mean_variance))
# d['results']['var']['std']['mean']= np.mean(self.grid.mean_deviation)
# d['results']['var']['std']['max']= np.max(self.grid.mean_deviation)
# d['results']['var']['std']['min']= np.min(self.grid.mean_deviation)
means=[]
maxs=[]
mins=[]
for i in range(self.n_models):
means.append(float(np.mean(self.grid.models[i].variance)))
maxs.append(float(np.max(self.grid.models[i].variance)))
mins.append(float(np.min(self.grid.models[i].variance)))
d['results']['models']={}
d['results']['models']['means']=means
d['results']['models']['maxs']=maxs
d['results']['models']['mins']=mins
rospy.sleep(0.1)
self.results.append(d)
if self.exploration_strategy == 'greedy':
nwp = len(self.explo_plan.route) + len(self.explo_plan.explored_wp)
print nwp, " nodes in plan"
if nwp <= self.n_goals:
#THIS IS the ONE
#self.explo_plan.add_limited_greedy_goal(self.grid.mean_variance, self.last_coord)
self.explo_plan.add_greedy_goal(self.grid.mean_variance)
#self.explo_plan.add_montecarlo_goal(self.grid.mean_variance, self.last_coord)
#self.draw_mode="deviation"
# self.current_model=0
# if self.redraw_devi:
# self.draw_all_devs()
self.redraw()
rospy.sleep(0.1)
self.exploring=4
def scan_callback(self, msg):
if msg.data == 'Reading':
print "GOT READING!!!"
cx, cy = self.grid.get_cell_inds_from_coords(self.last_coord)
if cx <0 or cy<0:
print "Reading outside the grid"
else:
print 'Reading at: ', cx, cy
for i in self.topo_map.waypoints:
if (cy,cx) == i.ind:
print 'Setting: ', i.name, i.coord, "as Visited"
i.visited= True
self.visited_wp.append(i)
self.grid_canvas.draw_waypoints(self.topo_map.waypoints, (0,255,0,2), thickness=1)
self.grid_canvas.draw_waypoints(self.visited_wp, (0,0,255,2), thickness=1)
self.redraw()
def refresh(self):
#self.show_image = self.image.copy()
#self.show_image = cv2.addWeighted(self.gps_canvas.image, 0.7, self.image, 1.0, 0)
#self.show_image = transparentOverlay(self.image, self.gps_canvas.image)
self.show_image = overlay_image_alpha(self.image,self.gps_canvas.image)
def redraw(self):
self.image = cv2.addWeighted(self.grid_canvas.image, 0.5, self.base_image, 1.0, 0)
self.image = cv2.addWeighted(self.limits_canvas.image, 0.75, self.image, 1.0, 0)
self.image = cv2.addWeighted(self.exploration_canvas.image, 0.75, self.image, 1.0, 0)
if self.draw_mode == "inputs" and self.current_model>=0 :
self.image = cv2.addWeighted(self.model_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.model_legend[self.current_model].image)
if self.draw_mode == "kriging":# and self.current_model>=0 :
self.image = cv2.addWeighted(self.kriging_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)
#self.image = cv2.addWeighted(self.klegend_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.klegend_canvas[self.current_model].image)
if self.draw_mode == "deviation":# and self.current_model>=0 :
self.image = cv2.addWeighted(self.sigma_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)
#self.image = cv2.addWeighted(self.klegend3_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.klegend3_canvas[self.current_model].image)
if self.draw_mode == "variance":# and self.current_model>=0 :
self.image = cv2.addWeighted(self.sigma2_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)
#self.image = cv2.addWeighted(self.klegend2_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.klegend2_canvas[self.current_model].image)
if self.draw_mode == "means":
self.image = cv2.addWeighted(self.mean_dev_canvas.image, 0.75, self.image, 1.0, 0)
#self.image = cv2.addWeighted(self.klegend2_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.mean_dev_legend_canvas.image)
self.show_image = self.image.copy()
def click_callback(self, event, x, y, flags, param):
if event == cv2.EVENT_RBUTTONDOWN:
click_coord = self.satellite._pix2coord(x,y)
cx, cy = self.grid.get_cell_inds_from_coords(click_coord)
if cx <0 or cy<0:
print "click outside the grid"
else:
print cx, cy
for i in self.topo_map.waypoints:
if (cy,cx) == i.ind:
print i.name, i.coord.easting, i.coord.northing
i.visited= True
self.visited_wp.append(i)
self.grid_canvas.draw_waypoints(self.topo_map.waypoints, (0,255,0,2), thickness=1)
self.grid_canvas.draw_waypoints(self.visited_wp, (0,0,255,2), thickness=1)
self.redraw()
if event == cv2.EVENT_LBUTTONDOWN:
click_coord = self.satellite._pix2coord(x,y)
cx, cy = self.grid.get_cell_inds_from_coords(click_coord)
if cx <0 or cy<0:
print "click outside the grid"
else:
print cx, cy
for i in self.topo_map.waypoints:
if (cy,cx) == i.ind:
self.open_nav_client.cancel_goal()
targ = open_nav.msg.OpenNavActionGoal()
#goal.goal.goal.header.
targ.goal.coords.header.stamp=rospy.Time.now()
targ.goal.coords.latitude=i.coord.lat
targ.goal.coords.longitude=i.coord.lon
print targ
self.navigating=True
self.open_nav_client.send_goal(targ.goal)
#self.client.wait_for_result()
# Prints out the result of executing the action
#ps = self.client.get_result()
#print ps
def draw_inputs(self, nm):
minv = self.grid.models[nm].lims[0]
maxv = self.grid.models[nm].lims[1]
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.model_canvas[nm].clear_image()
self.model_legend[nm].clear_image()
for i in self.grid.models[nm].orig_data:
cell = self.grid.cells[i.y][i.x]
a= colmap.to_rgba(int(i.value))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.model_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)
self.model_canvas[nm].put_text(self.grid.models[nm].name)
self.model_legend[nm].put_text(self.grid.models[nm].name)
self.model_legend[nm].draw_legend(minv, maxv, colmap, title="Kriging")
def draw_krigged(self, nm):
print "drawing kriging" + str(nm)
minv = self.grid.models[nm].min_val
maxv = self.grid.models[nm].max_val
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.kriging_canvas[nm].clear_image()
self.klegend_canvas[nm].clear_image()
for i in range(self.grid.models[nm].shape[0]):
for j in range(self.grid.models[nm].shape[1]):
cell = self.grid.cells[i][j]
a= colmap.to_rgba(int(self.grid.models[nm].output[i][j]))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.kriging_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)
self.klegend_canvas[nm].put_text(self.grid.models[nm].name)
self.klegend_canvas[nm].draw_legend(minv, maxv, colmap, title="Kriging")
self.redraw()
def draw_variance(self, nm):
print "drawing variance" + str(nm)
minv = self.grid.models[nm].min_var
maxv = self.grid.models[nm].max_var
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax= maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.sigma_canvas[nm].clear_image()
self.klegend2_canvas[nm].clear_image()
for i in range(self.grid.models[nm].shape[0]):
for j in range(self.grid.models[nm].shape[1]):
cell = self.grid.cells[i][j]
a= colmap.to_rgba(int(self.grid.models[nm].variance[i][j]))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.sigma2_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)
self.klegend2_canvas[nm].put_text(self.grid.models[nm].name)
self.klegend2_canvas[nm].draw_legend(minv, maxv, colmap, title="Variance")
self.redraw()
def draw_means(self):
print "drawing mean deviation ..."
minv = self.grid.min_mean_deviation
maxv = self.grid.max_mean_deviation
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.mean_dev_canvas.clear_image()
self.mean_dev_legend_canvas.clear_image()
for i in range(self.grid.shape[0]):
for j in range(self.grid.shape[1]):
cell = self.grid.cells[i][j]
a= colmap.to_rgba(int(self.grid.mean_deviation[i][j]))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.mean_dev_canvas.draw_cell(cell, self.grid.cell_size, b, thickness=-1)
#self.mean_dev_legend_canvas.put_text(self.grid.models[nm].name)
self.mean_dev_legend_canvas.draw_legend(minv, maxv, colmap, title="Mean Deviation")
#self.draw_mode="means"
self.redraw()
def draw_deviation(self, nm):
print "drawing deviation" + str(nm)
minv = self.grid.models[nm].min_dev
maxv = self.grid.models[nm].max_dev
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.sigma_canvas[nm].clear_image()
self.klegend3_canvas[nm].clear_image()
for i in range(self.grid.models[nm].shape[0]):
for j in range(self.grid.models[nm].shape[1]):
cell = self.grid.cells[i][j]
a= colmap.to_rgba(int(self.grid.models[nm].deviation[i][j]))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.sigma_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)
self.klegend3_canvas[nm].put_text(self.grid.models[nm].name)
self.klegend3_canvas[nm].draw_legend(minv, maxv, colmap, title="Deviation")
self.redraw()
def krieg_all_mmodels(self):
for i in self.grid.models:
i.do_krigging()
self.redraw_kriged=True
self.redraw_var=True
self.redraw_devi=True
def draw_all_outputs(self):
for i in self.grid.models:
self.draw_krigged(self.model_canvas_names.index(i.name))
self.redraw_kriged=False
def draw_all_vars(self):
for i in self.grid.models:
self.draw_variance(self.model_canvas_names.index(i.name))
self.redraw_var=False
def draw_all_devs(self):
for i in self.grid.models:
self.draw_deviation(self.model_canvas_names.index(i.name))
self.redraw_devi=False
def _change_mode(self, k):
if k == 27:
self.running = False
elif k == ord('q'):
self.running = False
elif k == ord('n'):
print len(self.grid.models)
elif k == ord('i'):
if self.n_models > 0:
self.draw_mode="inputs"
self.current_model=0
self.redraw()
elif k == ord('d'):
if self.n_models > 0:
self.draw_mode="deviation"
self.current_model=0
if self.redraw_devi:
self.draw_all_devs()
self.redraw()
elif k == ord('v'):
if self.n_models > 0:
self.draw_mode="variance"
self.current_model=0
if self.redraw_var:
self.draw_all_vars()
self.redraw()
elif k == ord('t'):
self.krieg_all_mmodels()
self.grid.calculate_mean_grid()
if self.n_models > 0:
self.draw_all_outputs()
self.draw_mode="kriging"
self.current_model=0
self.redraw()
elif k == ord('k'):
if self.n_models > 0:
self.draw_mode="kriging"
self.current_model=0
if self.redraw_kriged:
self.draw_all_outputs()
self.redraw()
elif k == ord('>'):
self.current_model+=1
if self.current_model >= self.n_models:
self.current_model=0
self.redraw()
elif k == ord('<'):
self.current_model-=1
if self.current_model < 0:
self.current_model=self.n_models-1
self.redraw()
elif k == ord('w'):
self.grid_canvas.draw_waypoints(self.topo_map.waypoints, (0,255,0,2), thickness=1)
self.grid_canvas.draw_waypoints(self.visited_wp, (0,0,255,2), thickness=1)
self.redraw()
elif k == ord('e'):
self.exploration_canvas.draw_waypoints(self.explo_plan.targets, (255,200,128,255), thickness=3)
self.exploration_canvas.draw_plan(self.explo_plan.route, 'cyan', thickness=1)
self.redraw()
#xnames = [x.name for x in self.explo_plan.route]
#print xnames
elif k == ord('g'):
if len(self.explo_plan.route) >0:
gg=self.explo_plan.route[0]
self.open_nav_client.cancel_goal()
targ = open_nav.msg.OpenNavActionGoal()
targ.goal.coords.header.stamp=rospy.Time.now()
targ.goal.coords.latitude=gg.coord.lat
targ.goal.coords.longitude=gg.coord.lon
print "Going TO: ", gg
self.exploring=1
self.navigating=True
self.open_nav_client.send_goal(targ.goal)
self.result_counter=0
self.explodist=0
else:
print "Done Exploring"
self.exploring = 0
elif k == ord('y'):
vwp = []
for i in self.visited_wp:
vwp.append(i.name)
yml = yaml.safe_dump(vwp, default_flow_style=False)
fh = open("visited.yaml", "w")
s_output = str(yml)
fh.write(s_output)
fh.close
elif k == ord('l'):
print "loading visited"
with open("visited.yaml", 'r') as f:
visited = yaml.load(f)
for i in visited:
for l in self.topo_map.waypoints:
if i == l.name:
self.visited_wp.append(l)
break
elif k == ord('a'):
self.grid.calculate_mean_grid()
self.draw_means()
self.draw_mode="means"
elif k == ord('p'):
self.pause_exp= not self.pause_exp
elif k == ord('c'):
print self.grid.limits
print "Area: ", self.grid.calculate_area(self.grid.limits)
print "Area of Area: ", self.grid.area.area_size
colours=['magenta','cyan', 'grey','white','red','yellow','green','blue']
nc=0
for j in self.grid.area_splits:
print j.area_size
#self.limits_canvas.draw_coordinate(j.centre, 'crimson', size=3, thickness=2)
for i in j.limit_lines:
#self.limits_canvas.draw_line(i, colours[nc], thickness=1)
self.limits_canvas.draw_line(i, 'white', thickness=1)
if nc < len(colours)-1:
nc+=1
else:
nc=0
self.redraw()
elif k== ord('r'):
#diff = (self.grid.models[1].output - self.grid.models[0].output)
#print np.mean(diff), np.std(diff), diff.dtype
print self.get_errors()
elif k== ord('o'):
print self.results
outfile = self.expid + '.yaml'
#print self.data_out
yml = yaml.safe_dump(self.results, default_flow_style=False)
fh = open(outfile, "w")
s_output = str(yml)
#print s_output
fh.write(s_output)
fh.close
def get_errors(self):
error_chain=[]
shapeo = self.grid.models[0].output.shape
#print vals
print "Waiting for Service"
rospy.wait_for_service('/compare_model')
compare_serv = rospy.ServiceProxy('/compare_model', CompareModels)
for i in range(self.n_models):
try:
d={}
print "going for it ", i
vals = np.reshape(self.grid.models[i].output, -1)
resp1 = compare_serv('kriging', i, shapeo[0], shapeo[1], vals.tolist())
d['name']= self.grid.models[i].name
d['type']= 'kriging'
d['errors']={}
d['errors']['error']=resp1.error
d['errors']['mse']=resp1.mse
d['errors']['std']=resp1.std
d['errors']['var']=resp1.var
#print resp1
error_chain.append(d)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
try:
d={}
print "Mean "
vals = np.reshape(self.grid.mean_output, -1)
resp1 = compare_serv('mean', 0, shapeo[0], shapeo[1], vals.tolist())
#print self.grid.mean_output
d['name']= 'mean'
d['type']= 'mean'
d['errors']={}
d['errors']['error']=resp1.error
d['errors']['mse']=resp1.mse
d['errors']['std']=resp1.std
d['errors']['var']=resp1.var
#print resp1
error_chain.append(d)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return error_chain
def signal_handler(self, signal, frame):
self.running = False
print('You pressed Ctrl+C!')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--cell_size", type=int, default=10,
help="cell size in meters")
parser.add_argument("--initial_percent", type=float, default=0.05,
help="Percentage of cells to be explored on the initial plan")
parser.add_argument("--limits_file", type=str, default='limits.coords',
help="Percentage of cells to be explored on the initial plan")
parser.add_argument("--initial_waypoint", type=str, default='WayPoint498',
help="Percentage of cells to be explored on the initial plan")
parser.add_argument("--area_coverage_type", type=str, default='area_split',
help="Type of area coverage, random or area_split")
parser.add_argument("--experiment_name", type=str, default='exp1',
help="Experiment ID")
args = parser.parse_args()
rospy.init_node('kriging_exploration')
#Explorator(53.261685, -0.527158, 16, 640, args.cell_size)
#Explorator(53.267213, -0.533420, 17, 640, args) #Football Field
Explorator(53.261576, -0.526648, 17, 640, args) #Half cosmos field
#Explorator(53.261685, -0.525158, 17, 640, args) #COSMOS Field
| mit |
rabipanda/tensorflow | tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py | 130 | 9577 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.metrics.python.ops import histogram_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class Strict1dCumsumTest(test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.test_session():
tensor = constant_op.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = constant_op.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = constant_op.constant([3], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = constant_op.constant([1, 3, 6], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.test_session():
labels = constant_op.constant([], shape=[0], dtype=dtypes.bool)
scores = constant_op.constant([], shape=[0], dtype=dtypes.float32)
score_range = [0, 1.]
auc, update_op = histogram_ops.auc_using_histogram(labels, scores,
score_range)
variables.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(
nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(
nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(
nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(
nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(
nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.test_session():
labels = array_ops.placeholder(dtypes.bool, shape=[num_records])
scores = array_ops.placeholder(dtypes.float32, shape=[num_records])
auc, update_op = histogram_ops.auc_using_histogram(
labels, scores, score_range, nbins=nbins)
variables.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
test.main()
| apache-2.0 |
sharthee/ProgrammingAssignment2 | labs/lab2/cs109style.py | 38 | 1293 | from __future__ import print_function
from IPython.core.display import HTML
from matplotlib import rcParams
#colorbrewer2 Dark2 qualitative color table
dark2_colors = [(0.10588235294117647, 0.6196078431372549, 0.4666666666666667),
(0.8509803921568627, 0.37254901960784315, 0.00784313725490196),
(0.4588235294117647, 0.4392156862745098, 0.7019607843137254),
(0.9058823529411765, 0.1607843137254902, 0.5411764705882353),
(0.4, 0.6509803921568628, 0.11764705882352941),
(0.9019607843137255, 0.6705882352941176, 0.00784313725490196),
(0.6509803921568628, 0.4627450980392157, 0.11372549019607843),
(0.4, 0.4, 0.4)]
def customize_mpl():
"""Tweak matplotlib visual style"""
print("Setting custom matplotlib visual style")
rcParams['figure.figsize'] = (10, 6)
rcParams['figure.dpi'] = 150
rcParams['axes.color_cycle'] = dark2_colors
rcParams['lines.linewidth'] = 2
rcParams['axes.grid'] = True
rcParams['axes.facecolor'] = '#eeeeee'
rcParams['font.size'] = 14
rcParams['patch.edgecolor'] = 'none'
def customize_css():
print("Setting custom CSS for the IPython Notebook")
styles = open('custom.css', 'r').read()
return HTML(styles)
| mit |
rabernat/xrft | setup.py | 1 | 1391 | import os
import versioneer
from setuptools import setup, find_packages
PACKAGES = find_packages()
DISTNAME = 'xrft'
LICENSE = 'MIT'
AUTHOR = 'xrft Developers'
AUTHOR_EMAIL = 'takaya@ldeo.columbia.edu'
URL = 'https://github.com/xgcm/xrft'
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
]
INSTALL_REQUIRES = ['xarray', 'dask', 'numpy', 'pandas', 'scipy']
EXTRAS_REQUIRE = ['cftime']
SETUP_REQUIRES = ['pytest-runner']
TESTS_REQUIRE = ['pytest >= 2.8', 'coverage']
DESCRIPTION = "Discrete Fourier Transform with xarray"
def readme():
with open('README.rst') as f:
return f.read()
setup(name=DISTNAME,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license=LICENSE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
long_description=readme(),
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
tests_require=TESTS_REQUIRE,
url=URL,
packages=find_packages())
| mit |
enigmampc/catalyst | catalyst/data/continuous_future_reader.py | 1 | 12198 | import numpy as np
import pandas as pd
from catalyst.data.session_bars import SessionBarReader
class ContinuousFutureSessionBarReader(SessionBarReader):
def __init__(self, bar_reader, roll_finders):
self._bar_reader = bar_reader
self._roll_finders = roll_finders
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
fields : list of str
'sid'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
rolls_by_asset = {}
for asset in assets:
rf = self._roll_finders[asset.roll_style]
rolls_by_asset[asset] = rf.get_rolls(
asset.root_symbol, start_date, end_date, asset.offset)
num_sessions = len(
self.trading_calendar.sessions_in_range(start_date, end_date))
shape = num_sessions, len(assets)
results = []
tc = self._bar_reader.trading_calendar
sessions = tc.sessions_in_range(start_date, end_date)
# Get partitions
partitions_by_asset = {}
for asset in assets:
partitions = []
partitions_by_asset[asset] = partitions
rolls = rolls_by_asset[asset]
start = start_date
for roll in rolls:
sid, roll_date = roll
start_loc = sessions.get_loc(start)
if roll_date is not None:
end = roll_date - sessions.freq
end_loc = sessions.get_loc(end)
else:
end = end_date
end_loc = len(sessions) - 1
partitions.append((sid, start, end, start_loc, end_loc))
if roll[-1] is not None:
start = sessions[end_loc + 1]
for column in columns:
if column != 'volume' and column != 'sid':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.int64)
for i, asset in enumerate(assets):
partitions = partitions_by_asset[asset]
for sid, start, end, start_loc, end_loc in partitions:
if column != 'sid':
result = self._bar_reader.load_raw_arrays(
[column], start, end, [sid])[0][:, 0]
else:
result = int(sid)
out[start_loc:end_loc + 1, i] = result
results.append(out)
return results
@property
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
return self._bar_reader.last_available_dt
@property
def trading_calendar(self):
"""
Returns the catalyst.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
return self._bar_reader.trading_calendar
@property
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
return self._bar_reader.first_trading_day
def get_value(self, continuous_future, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
rf = self._roll_finders[continuous_future.roll_style]
sid = (rf.get_contract_center(continuous_future.root_symbol,
dt,
continuous_future.offset))
return self._bar_reader.get_value(sid, dt, field)
def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : catalyst.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
rf = self._roll_finders[asset.roll_style]
sid = (rf.get_contract_center(asset.root_symbol,
dt,
asset.offset))
if sid is None:
return pd.NaT
contract = rf.asset_finder.retrieve_asset(sid)
return self._bar_reader.get_last_traded_dt(contract, dt)
@property
def sessions(self):
"""
Returns
-------
sessions : DatetimeIndex
All session labels (unionining the range for all assets) which the
reader can provide.
"""
return self._bar_reader.sessions
class ContinuousFutureMinuteBarReader(SessionBarReader):
def __init__(self, bar_reader, roll_finders):
self._bar_reader = bar_reader
self._roll_finders = roll_finders
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
rolls_by_asset = {}
tc = self.trading_calendar
start_session = tc.minute_to_session_label(start_date)
end_session = tc.minute_to_session_label(end_date)
for asset in assets:
rf = self._roll_finders[asset.roll_style]
rolls_by_asset[asset] = rf.get_rolls(
asset.root_symbol,
start_session,
end_session, asset.offset)
sessions = tc.sessions_in_range(start_date, end_date)
minutes = tc.minutes_in_range(start_date, end_date)
num_minutes = len(minutes)
shape = num_minutes, len(assets)
results = []
# Get partitions
partitions_by_asset = {}
for asset in assets:
partitions = []
partitions_by_asset[asset] = partitions
rolls = rolls_by_asset[asset]
start = start_date
for roll in rolls:
sid, roll_date = roll
start_loc = minutes.searchsorted(start)
if roll_date is not None:
_, end = tc.open_and_close_for_session(
roll_date - sessions.freq)
end_loc = minutes.searchsorted(end)
else:
end = end_date
end_loc = len(minutes) - 1
partitions.append((sid, start, end, start_loc, end_loc))
if roll[-1] is not None:
start, _ = tc.open_and_close_for_session(
tc.minute_to_session_label(minutes[end_loc + 1]))
for column in columns:
if column != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
for i, asset in enumerate(assets):
partitions = partitions_by_asset[asset]
for sid, start, end, start_loc, end_loc in partitions:
if column != 'sid':
result = self._bar_reader.load_raw_arrays(
[column], start, end, [sid])[0][:, 0]
else:
result = int(sid)
out[start_loc:end_loc + 1, i] = result
results.append(out)
return results
@property
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
return self._bar_reader.last_available_dt
@property
def trading_calendar(self):
"""
Returns the catalyst.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
return self._bar_reader.trading_calendar
@property
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
return self._bar_reader.first_trading_day
def get_value(self, continuous_future, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
rf = self._roll_finders[continuous_future.roll_style]
sid = (rf.get_contract_center(continuous_future.root_symbol,
dt,
continuous_future.offset))
return self._bar_reader.get_value(sid, dt, field)
def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : catalyst.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
rf = self._roll_finders[asset.roll_style]
sid = (rf.get_contract_center(asset.root_symbol,
dt,
asset.offset))
if sid is None:
return pd.NaT
contract = rf.asset_finder.retrieve_asset(sid)
return self._bar_reader.get_last_traded_dt(contract, dt)
@property
def sessions(self):
return self._bar_reader.sessions
| apache-2.0 |
CIFASIS/pylearn2 | pylearn2/packaged_dependencies/theano_linear/unshared_conv/localdot.py | 39 | 5044 | """
WRITEME
"""
import logging
from ..linear import LinearTransform
from .unshared_conv import FilterActs, ImgActs
from theano.compat.six.moves import xrange
from theano.sandbox import cuda
if cuda.cuda_available:
import gpu_unshared_conv # register optimizations
import numpy as np
import warnings
try:
import matplotlib.pyplot as plt
except (RuntimeError, ImportError, TypeError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
logger = logging.getLogger(__name__)
class LocalDot(LinearTransform):
"""
LocalDot is an linear operation computationally similar to
convolution in the spatial domain, except that whereas convolution
applying a single filter or set of filters across an image, the
LocalDot has different filterbanks for different points in the image.
Mathematically, this is a general linear transform except for a
restriction that filters are 0 outside of a spatially localized patch
within the image.
Image shape is 5-tuple:
color_groups
colors_per_group
rows
cols
images
Filterbank shape is 7-tuple (!)
0 row_positions
1 col_positions
2 colors_per_group
3 height
4 width
5 color_groups
6 filters_per_group
The result of left-multiplication a 5-tuple with shape:
filter_groups
filters_per_group
row_positions
col_positions
images
Parameters
----------
filters : WRITEME
irows : WRITEME
Image rows
icols : WRITEME
Image columns
subsample : WRITEME
padding_start : WRITEME
filters_shape : WRITEME
message : WRITEME
"""
def __init__(self, filters, irows, icols=None,
subsample=(1, 1),
padding_start=None,
filters_shape=None,
message=""):
LinearTransform.__init__(self, [filters])
self._filters = filters
if filters_shape is None:
self._filters_shape = tuple(filters.get_value(borrow=True).shape)
else:
self._filters_shape = tuple(filters_shape)
self._irows = irows
if icols is None:
self._icols = irows
else:
self._icols = icols
if self._icols != self._irows:
raise NotImplementedError('GPU code at least needs square imgs')
self._subsample = tuple(subsample)
self._padding_start = padding_start
if len(self._filters_shape) != 7:
raise TypeError('need 7-tuple filter shape', self._filters_shape)
if self._subsample[0] != self._subsample[1]:
raise ValueError('subsampling must be same in rows and cols')
self._filter_acts = FilterActs(self._subsample[0])
self._img_acts = ImgActs(module_stride=self._subsample[0])
if message:
self._message = message
else:
self._message = filters.name
def rmul(self, x):
"""
.. todo::
WRITEME
"""
assert x.ndim == 5
return self._filter_acts(x, self._filters)
def rmul_T(self, x):
"""
.. todo::
WRITEME
"""
return self._img_acts(self._filters, x, self._irows, self._icols)
def col_shape(self):
"""
.. todo::
WRITEME
"""
ishape = self.row_shape() + (-99,)
fshape = self._filters_shape
hshape, = self._filter_acts.infer_shape(None, (ishape, fshape))
assert hshape[-1] == -99
return hshape[:-1]
def row_shape(self):
"""
.. todo::
WRITEME
"""
fshape = self._filters_shape
fmodulesR, fmodulesC, fcolors, frows, fcols = fshape[:-2]
fgroups, filters_per_group = fshape[-2:]
return fgroups, fcolors, self._irows, self._icols
def print_status(self):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("TODO: fix dependence on non-existent "
"ndarray_status function")
"""print ndarray_status(
self._filters.get_value(borrow=True),
msg='%s{%s}'% (self.__class__.__name__,
self._message))
"""
def imshow_gray(self):
"""
.. todo::
WRITEME
"""
filters = self._filters.get_value()
modR, modC, colors, rows, cols, grps, fs_per_grp = filters.shape
logger.info(filters.shape)
rval = np.zeros((
modR * (rows + 1) - 1,
modC * (cols + 1) - 1,
))
for rr, modr in enumerate(xrange(0, rval.shape[0], rows + 1)):
for cc, modc in enumerate(xrange(0, rval.shape[1], cols + 1)):
rval[modr:modr + rows, modc:modc + cols] = filters[rr, cc, 0, :, :, 0, 0]
plt.imshow(rval, cmap='gray')
return rval
| bsd-3-clause |
dingocuster/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
nhuntwalker/astroML | book_figures/chapter5/fig_likelihood_cauchy.py | 3 | 3219 | """
Log-likelihood for Cauchy Distribution
--------------------------------------
Figure 5.10
An illustration of the logarithm of posterior probability distribution for
:math:`\mu` and :math:`\gamma`, :math:`L(\mu,\gamma)` (see eq. 5.75) for
N = 10 (the sample is generated using the Cauchy distribution with
:math:`\mu = 0` and :math:`\gamma = 2`). The maximum of L is renormalized
to 0, and color coded as shown in the legend. The contours enclose the regions
that contain 0.683, 0.955 and 0.997 of the cumulative (integrated) posterior
probability.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function, division
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import cauchy
from astroML.plotting.mcmc import convert_to_stdev
from astroML.stats import median_sigmaG
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def cauchy_logL(xi, gamma, mu):
"""Equation 5.74: cauchy likelihood"""
xi = np.asarray(xi)
n = xi.size
shape = np.broadcast(gamma, mu).shape
xi = xi.reshape(xi.shape + tuple([1 for s in shape]))
return ((n - 1) * np.log(gamma)
- np.sum(np.log(gamma ** 2 + (xi - mu) ** 2), 0))
#------------------------------------------------------------
# Define the grid and compute logL
gamma = np.linspace(0.1, 5, 70)
mu = np.linspace(-5, 5, 70)
np.random.seed(44)
mu0 = 0
gamma0 = 2
xi = cauchy(mu0, gamma0).rvs(10)
logL = cauchy_logL(xi, gamma[:, np.newaxis], mu)
logL -= logL.max()
#------------------------------------------------------------
# Find the max and print some information
i, j = np.where(logL >= np.max(logL))
print("mu from likelihood:", mu[j])
print("gamma from likelihood:", gamma[i])
print()
med, sigG = median_sigmaG(xi)
print("mu from median", med)
print("gamma from quartiles:", sigG / 1.483) # Equation 3.54
print()
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
plt.imshow(logL, origin='lower', cmap=plt.cm.binary,
extent=(mu[0], mu[-1], gamma[0], gamma[-1]),
aspect='auto')
plt.colorbar().set_label(r'$\log(L)$')
plt.clim(-5, 0)
plt.contour(mu, gamma, convert_to_stdev(logL),
levels=(0.683, 0.955, 0.997),
colors='k')
plt.text(0.5, 0.93,
r'$L(\mu,\gamma)\ \mathrm{for}\ \bar{x}=0,\ \gamma=2,\ n=10$',
bbox=dict(ec='k', fc='w', alpha=0.9),
ha='center', va='center', transform=plt.gca().transAxes)
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\gamma$')
plt.show()
| bsd-2-clause |
AllenDowney/SoftwareSystems | hw04/wave3/thinkdsp.py | 23 | 31996 | """This file contains code used in "Think DSP",
by Allen B. Downey, available from greenteapress.com
Copyright 2013 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import array
import math
import numpy
import random
import scipy
import scipy.stats
import struct
import subprocess
import thinkplot
from fractions import gcd
from wave import open as open_wave
import matplotlib.pyplot as pyplot
PI2 = math.pi * 2
def random_seed(x):
"""Initialize the random and numpy.random generators.
x: int seed
"""
random.seed(x)
numpy.random.seed(x)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class WavFileWriter(object):
"""Writes wav files."""
def __init__(self, filename='sound.wav', framerate=11025):
"""Opens the file and sets parameters.
filename: string
framerate: samples per second
"""
self.filename = filename
self.framerate = framerate
self.nchannels = 1
self.sampwidth = 2
self.bits = self.sampwidth * 8
self.bound = 2**(self.bits-1) - 1
self.fmt = 'h'
self.dtype = numpy.int16
self.fp = open_wave(self.filename, 'w')
self.fp.setnchannels(self.nchannels)
self.fp.setsampwidth(self.sampwidth)
self.fp.setframerate(self.framerate)
def write(self, wave):
"""Writes a wave.
wave: Wave
"""
zs = wave.quantize(self.bound, self.dtype)
self.fp.writeframes(zs.tostring())
def close(self, duration=0):
"""Closes the file.
duration: how many seconds of silence to append
"""
if duration:
self.write(rest(duration))
self.fp.close()
def read_wave(filename='sound.wav'):
"""Reads a wave file.
filename: string
returns: Wave
"""
fp = open_wave(filename, 'r')
nchannels = fp.getnchannels()
nframes = fp.getnframes()
sampwidth = fp.getsampwidth()
framerate = fp.getframerate()
z_str = fp.readframes(nframes)
fp.close()
dtype_map = {1:numpy.int8, 2:numpy.int16}
assert sampwidth in dtype_map
ys = numpy.fromstring(z_str, dtype=dtype_map[sampwidth])
wave = Wave(ys, framerate)
return wave
def play_wave(filename='sound.wav', player='aplay'):
"""Plays a wave file.
filename: string
player: string name of executable that plays wav files
"""
cmd = '%s %s' % (player, filename)
popen = subprocess.Popen(cmd, shell=True)
popen.communicate()
class _SpectrumParent(object):
"""Contains code common to Spectrum and DCT.
"""
@property
def max_freq(self):
return self.framerate / 2.0
@property
def freq_res(self):
return self.max_freq / (len(self.fs) - 1)
def plot(self, low=0, high=None, **options):
"""Plots amplitude vs frequency.
low: int index to start at
high: int index to end at
"""
thinkplot.plot(self.fs[low:high], self.amps[low:high], **options)
def plot_power(self, low=0, high=None, **options):
"""Plots power vs frequency.
low: int index to start at
high: int index to end at
"""
thinkplot.plot(self.fs[low:high], self.power[low:high], **options)
def estimate_slope(self):
"""Runs linear regression on log power vs log frequency.
returns: slope, inter, r2, p, stderr
"""
x = numpy.log(self.fs[1:])
y = numpy.log(self.power[1:])
t = scipy.stats.linregress(x,y)
return t
def peaks(self):
"""Finds the highest peaks and their frequencies.
returns: sorted list of (amplitude, frequency) pairs
"""
t = zip(self.amps, self.fs)
t.sort(reverse=True)
return t
class Spectrum(_SpectrumParent):
"""Represents the spectrum of a signal."""
def __init__(self, hs, framerate):
self.hs = hs
self.framerate = framerate
n = len(hs)
self.fs = numpy.linspace(0, self.max_freq, n)
def __add__(self, other):
if other == 0:
return self
assert self.framerate == other.framerate
hs = self.hs + other.hs
return Spectrum(hs, self.framerate)
__radd__ = __add__
@property
def real(self):
"""Returns the real part of the hs (read-only property)."""
return numpy.real(self.hs)
@property
def imag(self):
"""Returns the imaginary part of the hs (read-only property)."""
return numpy.imag(self.hs)
@property
def amps(self):
"""Returns a sequence of amplitudes (read-only property)."""
return numpy.absolute(self.hs)
@property
def power(self):
"""Returns a sequence of powers (read-only property)."""
return self.amps ** 2
def low_pass(self, cutoff, factor=0):
"""Attenuate frequencies above the cutoff.
cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
for i in xrange(len(self.hs)):
if self.fs[i] > cutoff:
self.hs[i] *= factor
def high_pass(self, cutoff, factor=0):
"""Attenuate frequencies below the cutoff.
cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
for i in xrange(len(self.hs)):
if self.fs[i] < cutoff:
self.hs[i] *= factor
def band_stop(self, low_cutoff, high_cutoff, factor=0):
"""Attenuate frequencies between the cutoffs.
low_cutoff: frequency in Hz
high_cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
for i in xrange(len(self.hs)):
if low_cutoff < self.fs[i] < high_cutoff:
self.hs[i] = 0
def pink_filter(self, beta=1):
"""Apply a filter that would make white noise pink.
beta: exponent of the pink noise
"""
denom = self.fs ** (beta/2.0)
denom[0] = 1
self.hs /= denom
def angles(self, i):
"""Computes phase angles in radians.
returns: list of phase angles
"""
return numpy.angle(self.hs)
def make_integrated_spectrum(self):
"""Makes an integrated spectrum.
"""
cs = numpy.cumsum(self.power)
cs /= cs[-1]
return IntegratedSpectrum(cs, self.fs)
def make_wave(self):
"""Transforms to the time domain.
returns: Wave
"""
ys = numpy.fft.irfft(self.hs)
return Wave(ys, self.framerate)
class IntegratedSpectrum(object):
"""Represents the integral of a spectrum."""
def __init__(self, cs, fs):
"""Initializes an integrated spectrum:
cs: sequence of cumulative amplitudes
fs: sequence of frequences
"""
self.cs = cs
self.fs = fs
def plot_power(self, low=0, high=None, expo=False, **options):
"""Plots the integrated spectrum.
low: int index to start at
high: int index to end at
"""
cs = self.cs[low:high]
fs = self.fs[low:high]
if expo:
cs = numpy.exp(cs)
thinkplot.Plot(fs, cs, **options)
def estimate_slope(self, low=1, high=-12000):
"""Runs linear regression on log cumulative power vs log frequency.
returns: slope, inter, r2, p, stderr
"""
#print self.fs[low:high]
#print self.cs[low:high]
x = numpy.log(self.fs[low:high])
y = numpy.log(self.cs[low:high])
t = scipy.stats.linregress(x,y)
return t
class Dct(_SpectrumParent):
"""Represents the spectrum of a signal."""
def __init__(self, amps, framerate):
self.amps = amps
self.framerate = framerate
n = len(amps)
self.fs = numpy.arange(n) / float(n) * self.max_freq
def make_wave(self):
"""Transforms to the time domain.
returns: Wave
"""
ys = scipy.fftpack.dct(self.amps, type=3) / 2
return Wave(ys, self.framerate)
class Spectrogram(object):
"""Represents the spectrum of a signal."""
def __init__(self, spec_map, seg_length, window_func=None):
"""Initialize the spectrogram.
spec_map: map from float time to Spectrum
seg_length: number of samples in each segment
window_func: function that computes the window
"""
self.spec_map = spec_map
self.seg_length = seg_length
self.window_func = window_func
def any_spectrum(self):
"""Returns an arbitrary spectrum from the spectrogram."""
return self.spec_map.itervalues().next()
@property
def time_res(self):
"""Time resolution in seconds."""
spectrum = self.any_spectrum()
return float(self.seg_length) / spectrum.framerate
@property
def freq_res(self):
"""Frequency resolution in Hz."""
return self.any_spectrum().freq_res
def times(self):
"""Sorted sequence of times.
returns: sequence of float times in seconds
"""
ts = sorted(self.spec_map.iterkeys())
return ts
def frequencies(self):
"""Sequence of frequencies.
returns: sequence of float freqencies in Hz.
"""
fs = self.any_spectrum().fs
return fs
def plot(self, low=0, high=None, **options):
"""Make a pseudocolor plot.
low: index of the lowest frequency component to plot
high: index of the highest frequency component to plot
"""
ts = self.times()
fs = self.frequencies()[low:high]
# make the array
size = len(fs), len(ts)
array = numpy.zeros(size, dtype=numpy.float)
# copy amplitude from each spectrum into a column of the array
for i, t in enumerate(ts):
spectrum = self.spec_map[t]
array[:,i] = spectrum.amps[low:high]
thinkplot.pcolor(ts, fs, array, **options)
def make_wave(self):
"""Inverts the spectrogram and returns a Wave.
returns: Wave
"""
res = []
for t, spectrum in sorted(self.spec_map.iteritems()):
wave = spectrum.make_wave()
n = len(wave)
if self.window_func:
window = 1 / self.window_func(n)
wave.window(window)
i = int(round(t * wave.framerate))
start = i - n / 2
end = start + n
res.append((start, end, wave))
starts, ends, waves = zip(*res)
low = min(starts)
high = max(ends)
ys = numpy.zeros(high-low, numpy.float)
for start, end, wave in res:
ys[start:end] = wave.ys
return Wave(ys, wave.framerate)
class Wave(object):
"""Represents a discrete-time waveform.
Note: the ys attribute is a "wave array" which is a numpy
array of floats.
"""
def __init__(self, ys, framerate, start=0):
"""Initializes the wave.
ys: wave array
framerate: samples per second
"""
self.ys = ys
self.framerate = framerate
self.start = start
def __len__(self):
return len(self.ys)
@property
def duration(self):
"""Duration (property).
returns: float duration in seconds
"""
return len(self.ys) / float(self.framerate)
def __or__(self, other):
"""Concatenates two waves.
other: Wave
returns: Wave
"""
if self.framerate != other.framerate:
raise ValueError('Wave.__or__: framerates do not agree')
ys = numpy.concatenate((self.ys, other.ys))
return Wave(ys, self.framerate)
def quantize(self, bound, dtype):
"""Maps the waveform to quanta.
bound: maximum amplitude
dtype: numpy data type or string
returns: quantized signal
"""
return quantize(self.ys, bound, dtype)
def apodize(self, denom=20, duration=0.1):
"""Tapers the amplitude at the beginning and end of the signal.
Tapers either the given duration of time or the given
fraction of the total duration, whichever is less.
denom: float fraction of the segment to taper
duration: float duration of the taper in seconds
"""
self.ys = apodize(self.ys, self.framerate, denom, duration)
def hamming(self):
"""Apply a Hamming window to the wave.
"""
self.ys *= numpy.hamming(len(self.ys))
def window(self, window):
"""Apply a window to the wave.
window: sequence of multipliers, same length as self.ys
"""
self.ys *= window
def normalize(self, amp=1.0):
"""Normalizes the signal to the given amplitude.
amp: float amplitude
"""
self.ys = normalize(self.ys, amp=amp)
def unbias(self):
"""Unbiases the signal.
"""
self.ys = unbias(self.ys)
def segment(self, start=0, duration=None):
"""Extracts a segment.
start: float start time in seconds
duration: float duration in seconds
returns: Wave
"""
i = start * self.framerate
if duration is None:
j = None
else:
j = i + duration * self.framerate
ys = self.ys[i:j]
return Wave(ys, self.framerate)
def make_spectrum(self):
"""Computes the spectrum using FFT.
returns: Spectrum
"""
hs = numpy.fft.rfft(self.ys)
return Spectrum(hs, self.framerate)
def make_dct(self):
amps = scipy.fftpack.dct(self.ys, type=2)
return Dct(amps, self.framerate)
def make_spectrogram(self, seg_length, window_func=numpy.hamming):
"""Computes the spectrogram of the wave.
seg_length: number of samples in each segment
window_func: function used to compute the window
returns: Spectrogram
"""
n = len(self.ys)
window = window_func(seg_length)
start, end, step = 0, seg_length, seg_length / 2
spec_map = {}
while end < n:
ys = self.ys[start:end] * window
hs = numpy.fft.rfft(ys)
t = (start + end) / 2.0 / self.framerate
spec_map[t] = Spectrum(hs, self.framerate)
start += step
end += step
return Spectrogram(spec_map, seg_length, window_func)
def plot(self, **options):
"""Plots the wave.
"""
n = len(self.ys)
ts = numpy.linspace(0, self.duration, n)
thinkplot.plot(ts, self.ys, **options)
def corr(self, other):
"""Correlation coefficient two waves.
other: Wave
returns: 2x2 covariance matrix
"""
mat = self.cov_mat(other)
corr = mat[0][1] / math.sqrt(mat[0][0] * mat[1][1])
return corr
def cov_mat(self, other):
"""Covariance matrix of two waves.
other: Wave
returns: 2x2 covariance matrix
"""
return numpy.cov(self.ys, other.ys)
def cov(self, other):
"""Covariance of two unbiased waves.
other: Wave
returns: float
"""
total = sum(self.ys * other.ys) / len(self.ys)
return total
def cos_cov(self, k):
"""Covariance with a cosine signal.
freq: freq of the cosine signal in Hz
returns: float covariance
"""
n = len(self.ys)
factor = math.pi * k / n
ys = [math.cos(factor * (i+0.5)) for i in range(n)]
total = 2 * sum(self.ys * ys)
return total
def cos_transform(self):
"""Discrete cosine transform.
returns: list of frequency, cov pairs
"""
n = len(self.ys)
res = []
for k in range(n):
cov = self.cos_cov(k)
res.append((k, cov))
return res
def write(self, filename='sound.wav'):
"""Write a wave file.
filename: string
"""
print 'Writing', filename
wfile = WavFileWriter(filename, self.framerate)
wfile.write(self)
wfile.close()
def play(self, filename='sound.wav'):
"""Plays a wave file.
filename: string
"""
self.write(filename)
play_wave(filename)
def unbias(ys):
"""Shifts a wave array so it has mean 0.
ys: wave array
returns: wave array
"""
return ys - ys.mean()
def normalize(ys, amp=1.0):
"""Normalizes a wave array so the maximum amplitude is +amp or -amp.
ys: wave array
amp: max amplitude (pos or neg) in result
returns: wave array
"""
high, low = abs(max(ys)), abs(min(ys))
return amp * ys / max(high, low)
def quantize(ys, bound, dtype):
"""Maps the waveform to quanta.
ys: wave array
bound: maximum amplitude
dtype: numpy data type of the result
returns: quantized signal
"""
if max(ys) > 1 or min(ys) < -1:
print 'Warning: normalizing before quantizing.'
ys = normalize(ys)
zs = (ys * bound).astype(dtype)
return zs
def apodize(ys, framerate, denom=20, duration=0.1):
"""Tapers the amplitude at the beginning and end of the signal.
Tapers either the given duration of time or the given
fraction of the total duration, whichever is less.
ys: wave array
framerate: int frames per second
denom: float fraction of the segment to taper
duration: float duration of the taper in seconds
returns: wave array
"""
# a fixed fraction of the segment
n = len(ys)
k1 = n / denom
# a fixed duration of time
k2 = int(duration * framerate)
k = min(k1, k2)
w1 = numpy.linspace(0, 1, k)
w2 = numpy.ones(n - 2*k)
w3 = numpy.linspace(1, 0, k)
window = numpy.concatenate((w1, w2, w3))
return ys * window
class Signal(object):
"""Represents a time-varying signal."""
def __add__(self, other):
"""Adds two signals.
other: Signal
returns: Signal
"""
if other == 0:
return self
return SumSignal(self, other)
__radd__ = __add__
@property
def period(self):
"""Period of the signal in seconds (property).
For non-periodic signals, use the default, 0.1 seconds
returns: float seconds
"""
return 0.1
def plot(self, framerate=11025):
"""Plots the signal.
framerate: samples per second
"""
duration = self.period * 3
wave = self.make_wave(duration, start=0, framerate=framerate)
wave.plot()
def make_wave(self, duration=1, start=0, framerate=11025):
"""Makes a Wave object.
duration: float seconds
start: float seconds
framerate: int frames per second
returns: Wave
"""
dt = 1.0 / framerate
ts = numpy.arange(start, duration, dt)
ys = self.evaluate(ts)
return Wave(ys, framerate=framerate, start=start)
def infer_framerate(ts):
"""Given ts, find the framerate.
Assumes that the ts are equally spaced.
ts: sequence of times in seconds
returns: frames per second
"""
dt = ts[1] - ts[0]
framerate = 1.0 / dt
return framerate
class SumSignal(Signal):
"""Represents the sum of signals."""
def __init__(self, *args):
"""Initializes the sum.
args: tuple of signals
"""
self.signals = args
@property
def period(self):
"""Period of the signal in seconds.
Note: this is not correct; it's mostly a placekeeper.
But it is correct for a harmonic sequence where all
component frequencies are multiples of the fundamental.
returns: float seconds
"""
return max(sig.period for sig in self.signals)
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
return sum(sig.evaluate(ts) for sig in self.signals)
class Sinusoid(Signal):
"""Represents a sinusoidal signal."""
def __init__(self, freq=440, amp=1.0, offset=0, func=numpy.sin):
"""Initializes a sinusoidal signal.
freq: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
offset: float phase offset in radians
func: function that maps phase to amplitude
"""
self.freq = freq
self.amp = amp
self.offset = offset
self.func = func
@property
def period(self):
"""Period of the signal in seconds.
returns: float seconds
"""
return 1.0 / self.freq
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
phases = PI2 * self.freq * ts + self.offset
ys = self.amp * self.func(phases)
return ys
def CosSignal(freq=440, amp=1.0, offset=0):
"""Makes a consine Sinusoid.
freq: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
offset: float phase offset in radians
returns: Sinusoid object
"""
return Sinusoid(freq, amp, offset, func=numpy.cos)
def SinSignal(freq=440, amp=1.0, offset=0):
"""Makes a sine Sinusoid.
freq: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
offset: float phase offset in radians
returns: Sinusoid object
"""
return Sinusoid(freq, amp, offset, func=numpy.sin)
class SquareSignal(Sinusoid):
"""Represents a square signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = self.amp * numpy.sign(unbias(frac))
return ys
class SawtoothSignal(Sinusoid):
"""Represents a sawtooth signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = normalize(unbias(frac), self.amp)
return ys
class ParabolicSignal(Sinusoid):
"""Represents a parabolic signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = frac**2
ys = normalize(unbias(ys), self.amp)
return ys
class GlottalSignal(Sinusoid):
"""Represents a periodic signal that resembles a glottal signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = frac**4 * (1-frac)
ys = normalize(unbias(ys), self.amp)
return ys
class TriangleSignal(Sinusoid):
"""Represents a triangle signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = numpy.abs(frac - 0.5)
ys = normalize(unbias(ys), self.amp)
return ys
class Chirp(Signal):
"""Represents a signal with variable frequency."""
def __init__(self, start=440, end=880, amp=1.0):
"""Initializes a linear chirp.
start: float frequency in Hz
end: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
"""
self.start = start
self.end = end
self.amp = amp
@property
def period(self):
"""Period of the signal in seconds.
returns: float seconds
"""
return ValueError('Non-periodic signal.')
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
freqs = numpy.linspace(self.start, self.end, len(ts)-1)
return self._evaluate(ts, freqs)
def _evaluate(self, ts, freqs):
"""Helper function that evaluates the signal.
ts: float array of times
freqs: float array of frequencies during each interval
"""
#n = len(freqs)
#print freqs[::n/2]
dts = numpy.diff(ts)
dps = PI2 * freqs * dts
phases = numpy.cumsum(dps)
phases = numpy.insert(phases, 0, 0)
ys = self.amp * numpy.cos(phases)
return ys
class ExpoChirp(Chirp):
"""Represents a signal with varying frequency."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
start, end = math.log10(self.start), math.log10(self.end)
freqs = numpy.logspace(start, end, len(ts)-1)
return self._evaluate(ts, freqs)
class SilentSignal(Signal):
"""Represents silence."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
return numpy.zeros(len(ts))
class _Noise(Signal):
"""Represents a noise signal (abstract parent class)."""
def __init__(self, amp=1.0):
"""Initializes a white noise signal.
amp: float amplitude, 1.0 is nominal max
"""
self.amp = amp
@property
def period(self):
"""Period of the signal in seconds.
returns: float seconds
"""
return ValueError('Non-periodic signal.')
class UncorrelatedUniformNoise(_Noise):
"""Represents uncorrelated uniform noise."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
ys = numpy.random.uniform(-self.amp, self.amp, len(ts))
return ys
class UncorrelatedGaussianNoise(_Noise):
"""Represents uncorrelated gaussian noise."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
ys = numpy.random.normal(0, 1, len(ts))
ys = normalize(ys, self.amp)
return ys
class BrownianNoise(_Noise):
"""Represents Brownian noise, aka red noise."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
Computes Brownian noise by taking the cumulative sum of
a uniform random series.
ts: float array of times
returns: float wave array
"""
#dys = numpy.random.normal(0, 1, len(ts))
dys = numpy.random.uniform(-1, 1, len(ts))
#ys = numpy.cumsum(dys)
ys = scipy.integrate.cumtrapz(dys, ts)
ys = normalize(unbias(ys), self.amp)
return ys
class PinkNoise(_Noise):
"""Represents Brownian noise, aka red noise."""
def __init__(self, amp=1.0, beta=1.0):
"""Initializes a pink noise signal.
amp: float amplitude, 1.0 is nominal max
"""
self.amp = amp
self.beta = beta
def make_wave(self, duration=1, start=0, framerate=11025):
"""Makes a Wave object.
duration: float seconds
start: float seconds
framerate: int frames per second
returns: Wave
"""
signal = UncorrelatedUniformNoise()
wave = signal.make_wave(duration, start, framerate)
spectrum = wave.make_spectrum()
spectrum.pink_filter(beta=self.beta)
wave2 = spectrum.make_wave()
wave2.unbias()
wave2.normalize(self.amp)
return wave2
def rest(duration):
"""Makes a rest of the given duration.
duration: float seconds
returns: Wave
"""
signal = SilentSignal()
wave = signal.make_wave(duration)
return wave
def make_note(midi_num, duration, sig_cons=CosSignal, framerate=11025):
"""Make a MIDI note with the given duration.
midi_num: int MIDI note number
duration: float seconds
sig_cons: Signal constructor function
framerate: int frames per second
returns: Wave
"""
freq = midi_to_freq(midi_num)
signal = sig_cons(freq)
wave = signal.make_wave(duration, framerate=framerate)
wave.apodize()
return wave
def make_chord(midi_nums, duration, sig_cons=CosSignal, framerate=11025):
"""Make a chord with the given duration.
midi_nums: sequence of int MIDI note numbers
duration: float seconds
sig_cons: Signal constructor function
framerate: int frames per second
returns: Wave
"""
freqs = [midi_to_freq(num) for num in midi_nums]
signal = sum(sig_cons(freq) for freq in freqs)
wave = signal.make_wave(duration, framerate=framerate)
wave.apodize()
return wave
def midi_to_freq(midi_num):
"""Converts MIDI note number to frequency.
midi_num: int MIDI note number
returns: float frequency in Hz
"""
x = (midi_num - 69) / 12.0
freq = 440.0 * 2**x
return freq
def sin_wave(freq, duration=1, offset=0):
"""Makes a sine wave with the given parameters.
freq: float cycles per second
duration: float seconds
offset: float radians
returns: Wave
"""
signal = SinSignal(freq, offset=offset)
wave = signal.make_wave(duration)
return wave
def cos_wave(freq, duration=1, offset=0):
"""Makes a cosine wave with the given parameters.
freq: float cycles per second
duration: float seconds
offset: float radians
returns: Wave
"""
signal = CosSignal(freq, offset=offset)
wave = signal.make_wave(duration)
return wave
def mag(a):
"""Computes the magnitude of a numpy array.
a: numpy array
returns: float
"""
return numpy.sqrt(numpy.dot(a, a))
def main():
cos_basis = cos_wave(440)
sin_basis = sin_wave(440)
wave = cos_wave(440, offset=math.pi/2)
cos_cov = cos_basis.cov(wave)
sin_cov = sin_basis.cov(wave)
print cos_cov, sin_cov, mag((cos_cov, sin_cov))
return
wfile = WavFileWriter()
for sig_cons in [SinSignal, TriangleSignal, SawtoothSignal,
GlottalSignal, ParabolicSignal, SquareSignal]:
print sig_cons
sig = sig_cons(440)
wave = sig.make_wave(1)
wave.apodize()
wfile.write(wave)
wfile.close()
return
signal = GlottalSignal(440)
signal.plot()
pyplot.show()
return
wfile = WavFileWriter()
for m in range(60, 0, -1):
wfile.write(make_note(m, 0.25))
wfile.close()
return
wave1 = make_note(69, 1)
wave2 = make_chord([69, 72, 76], 1)
wave = wave1 | wave2
wfile = WavFileWriter()
wfile.write(wave)
wfile.close()
return
sig1 = CosSignal(freq=440)
sig2 = CosSignal(freq=523.25)
sig3 = CosSignal(freq=660)
sig4 = CosSignal(freq=880)
sig5 = CosSignal(freq=987)
sig = sig1 + sig2 + sig3 + sig4
#wave = Wave(sig, duration=0.02)
#wave.plot()
wave = sig.make_wave(duration=1)
#wave.normalize()
wfile = WavFileWriter(wave)
wfile.write()
wfile.close()
if __name__ == '__main__':
main()
| gpl-3.0 |
dusenberrymw/systemml | src/main/python/systemml/converters.py | 8 | 12296 | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
__all__ = [ 'getNumCols', 'convertToMatrixBlock', 'convert_caffemodel', 'convert_lmdb_to_jpeg', 'convertToNumPyArr', 'convertToPandasDF', 'SUPPORTED_TYPES' , 'convertToLabeledDF', 'convertImageToNumPyArr', 'getDatasetMean']
import numpy as np
import pandas as pd
import os
import math
from pyspark.context import SparkContext
from scipy.sparse import coo_matrix, spmatrix, csr_matrix
from .classloader import *
SUPPORTED_TYPES = (np.ndarray, pd.DataFrame, spmatrix)
DATASET_MEAN = {'VGG_ILSVRC_19_2014':[103.939, 116.779, 123.68]}
def getNumCols(numPyArr):
if numPyArr.ndim == 1:
return 1
else:
return numPyArr.shape[1]
def get_pretty_str(key, value):
return '\t"' + key + '": ' + str(value) + ',\n'
def save_tensor_csv(tensor, file_path, shouldTranspose):
w = w.reshape(w.shape[0], -1)
if shouldTranspose:
w = w.T
np.savetxt(file_path, w, delimiter=',')
with open(file_path + '.mtd', 'w') as file:
file.write('{\n\t"data_type": "matrix",\n\t"value_type": "double",\n')
file.write(get_pretty_str('rows', w.shape[0]))
file.write(get_pretty_str('cols', w.shape[1]))
file.write(get_pretty_str('nnz', np.count_nonzero(w)))
file.write('\t"format": "csv",\n\t"description": {\n\t\t"author": "SystemML"\n\t}\n}\n')
def convert_caffemodel(sc, deploy_file, caffemodel_file, output_dir, format="binary", is_caffe_installed=False):
"""
Saves the weights and bias in the caffemodel file to output_dir in the specified format.
This method does not requires caffe to be installed.
Parameters
----------
sc: SparkContext
SparkContext
deploy_file: string
Path to the input network file
caffemodel_file: string
Path to the input caffemodel file
output_dir: string
Path to the output directory
format: string
Format of the weights and bias (can be binary, csv or text)
is_caffe_installed: bool
True if caffe is installed
"""
if is_caffe_installed:
if format != 'csv':
raise ValueError('The format ' + str(format) + ' is not supported when caffe is installed. Hint: Please specify format=csv')
import caffe
net = caffe.Net(deploy_file, caffemodel_file, caffe.TEST)
for layerName in net.params.keys():
num_parameters = len(net.params[layerName])
if num_parameters == 0:
continue
elif num_parameters == 2:
# Weights and Biases
layerType = net.layers[list(net._layer_names).index(layerName)].type
shouldTranspose = True if layerType == 'InnerProduct' else False
save_tensor_csv(net.params[layerName][0].data, os.path.join(output_dir, layerName + '_weight.mtx'), shouldTranspose)
save_tensor_csv(net.params[layerName][1].data, os.path.join(output_dir, layerName + '_bias.mtx'), shouldTranspose)
elif num_parameters == 1:
# Only Weight
layerType = net.layers[list(net._layer_names).index(layerName)].type
shouldTranspose = True if layerType == 'InnerProduct' else False
save_tensor_csv(net.params[layerName][0].data, os.path.join(output_dir, layerName + '_weight.mtx'), shouldTranspose)
else:
raise ValueError('Unsupported number of parameters:' + str(num_parameters))
else:
createJavaObject(sc, 'dummy')
utilObj = sc._jvm.org.apache.sysml.api.dl.Utils()
utilObj.saveCaffeModelFile(sc._jsc, deploy_file, caffemodel_file, output_dir, format)
def convert_lmdb_to_jpeg(lmdb_img_file, output_dir):
"""
Saves the images in the lmdb file as jpeg in the output_dir. This method requires caffe to be installed along with lmdb and cv2 package.
To install cv2 package, do `pip install opencv-python`.
Parameters
----------
lmdb_img_file: string
Path to the input lmdb file
output_dir: string
Output directory for images (local filesystem)
"""
import lmdb, caffe, cv2
lmdb_cursor = lmdb.open(lmdb_file, readonly=True).begin().cursor()
datum = caffe.proto.caffe_pb2.Datum()
i = 1
for _, value in lmdb_cursor:
datum.ParseFromString(value)
data = caffe.io.datum_to_array(datum)
output_file_path = os.path.join(output_dir, 'file_' + str(i) + '.jpg')
image = np.transpose(data, (1,2,0)) # CxHxW to HxWxC in cv2
cv2.imwrite(output_file_path, image)
i = i + 1
def convertToLabeledDF(sparkSession, X, y=None):
from pyspark.ml.feature import VectorAssembler
if y is not None:
pd1 = pd.DataFrame(X)
pd2 = pd.DataFrame(y, columns=['label'])
pdf = pd.concat([pd1, pd2], axis=1)
inputColumns = ['C' + str(i) for i in pd1.columns]
outputColumns = inputColumns + ['label']
else:
pdf = pd.DataFrame(X)
inputColumns = ['C' + str(i) for i in pdf.columns]
outputColumns = inputColumns
assembler = VectorAssembler(inputCols=inputColumns, outputCol='features')
out = assembler.transform(sparkSession.createDataFrame(pdf, outputColumns))
if y is not None:
return out.select('features', 'label')
else:
return out.select('features')
def _convertSPMatrixToMB(sc, src):
src = coo_matrix(src, dtype=np.float64)
numRows = src.shape[0]
numCols = src.shape[1]
data = src.data
row = src.row.astype(np.int32)
col = src.col.astype(np.int32)
nnz = len(src.col)
buf1 = bytearray(data.tostring())
buf2 = bytearray(row.tostring())
buf3 = bytearray(col.tostring())
createJavaObject(sc, 'dummy')
return sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertSciPyCOOToMB(buf1, buf2, buf3, numRows, numCols, nnz)
def _convertDenseMatrixToMB(sc, src):
numCols = getNumCols(src)
numRows = src.shape[0]
arr = src.ravel().astype(np.float64)
buf = bytearray(arr.tostring())
createJavaObject(sc, 'dummy')
return sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertPy4JArrayToMB(buf, numRows, numCols)
def _copyRowBlock(i, sc, ret, src, numRowsPerBlock, rlen, clen):
rowIndex = int(i / numRowsPerBlock)
tmp = src[i:min(i+numRowsPerBlock, rlen),]
mb = _convertSPMatrixToMB(sc, tmp) if isinstance(src, spmatrix) else _convertDenseMatrixToMB(sc, tmp)
sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.copyRowBlocks(mb, rowIndex, ret, numRowsPerBlock, rlen, clen)
return i
def convertToMatrixBlock(sc, src, maxSizeBlockInMB=8):
if not isinstance(sc, SparkContext):
raise TypeError('sc needs to be of type SparkContext')
isSparse = True if isinstance(src, spmatrix) else False
src = np.asarray(src, dtype=np.float64) if not isSparse else src
if len(src.shape) != 2:
src_type = str(type(src).__name__)
raise TypeError('Expected 2-dimensional ' + src_type + ', instead passed ' + str(len(src.shape)) + '-dimensional ' + src_type)
# Ignoring sparsity for computing numRowsPerBlock for now
numRowsPerBlock = int(math.ceil((maxSizeBlockInMB*1000000) / (src.shape[1]*8)))
multiBlockTransfer = False if numRowsPerBlock >= src.shape[0] else True
if not multiBlockTransfer:
return _convertSPMatrixToMB(sc, src) if isSparse else _convertDenseMatrixToMB(sc, src)
else:
# Since coo_matrix does not have range indexing
src = csr_matrix(src) if isSparse else src
rlen = int(src.shape[0])
clen = int(src.shape[1])
ret = sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.allocateDenseOrSparse(rlen, clen, isSparse)
[ _copyRowBlock(i, sc, ret, src, numRowsPerBlock, rlen, clen) for i in range(0, src.shape[0], numRowsPerBlock) ]
sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.postProcessAfterCopying(ret)
return ret
def convertToNumPyArr(sc, mb):
if isinstance(sc, SparkContext):
numRows = mb.getNumRows()
numCols = mb.getNumColumns()
createJavaObject(sc, 'dummy')
buf = sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertMBtoPy4JDenseArr(mb)
return np.frombuffer(buf, count=numRows*numCols, dtype=np.float64).reshape((numRows, numCols))
else:
raise TypeError('sc needs to be of type SparkContext') # TODO: We can generalize this by creating py4j gateway ourselves
# Returns the mean of a model if defined otherwise None
def getDatasetMean(dataset_name):
"""
Parameters
----------
dataset_name: Name of the dataset used to train model. This name is artificial name based on dataset used to train the model.
Returns
-------
mean: Mean value of model if its defined in the list DATASET_MEAN else None.
"""
try:
mean = DATASET_MEAN[dataset_name.upper()]
except:
mean = None
return mean
# Example usage: convertImageToNumPyArr(im, img_shape=(3, 224, 224), add_rotated_images=True, add_mirrored_images=True)
# The above call returns a numpy array of shape (6, 50176) in NCHW format
def convertImageToNumPyArr(im, img_shape=None, add_rotated_images=False, add_mirrored_images=False,
color_mode = 'RGB', mean=None):
## Input Parameters
# color_mode: In case of VGG models which expect image data in BGR format instead of RGB for other most models,
# color_mode parameter is used to process image data in BGR format.
# mean: mean value is used to subtract from input data from every pixel value. By default value is None, so mean value not subtracted.
if img_shape is not None:
num_channels = img_shape[0]
size = (img_shape[1], img_shape[2])
else:
num_channels = 1 if im.mode == 'L' else 3
size = None
if num_channels != 1 and num_channels != 3:
raise ValueError('Expected the number of channels to be either 1 or 3')
from PIL import Image
if size is not None:
im = im.resize(size, Image.LANCZOS)
expected_mode = 'L' if num_channels == 1 else 'RGB'
if expected_mode is not im.mode:
im = im.convert(expected_mode)
def _im2NumPy(im):
if expected_mode == 'L':
return np.asarray(im.getdata()).reshape((1, -1))
else:
im = (np.array(im).astype(np.float))
# (H,W,C) -> (C,H,W)
im = im.transpose(2, 0, 1)
# RGB -> BGR
if color_mode == 'BGR':
im = im[...,::-1]
# Subtract Mean
if mean is not None:
for c in range(3):
im[:, :, c] = im[:, :, c] - mean[c]
# (C,H,W) --> (1, C*H*W)
return im.reshape((1, -1))
ret = _im2NumPy(im)
if add_rotated_images:
ret = np.vstack((ret, _im2NumPy(im.rotate(90)), _im2NumPy(im.rotate(180)), _im2NumPy(im.rotate(270)) ))
if add_mirrored_images:
ret = np.vstack((ret, _im2NumPy(im.transpose(Image.FLIP_LEFT_RIGHT)), _im2NumPy(im.transpose(Image.FLIP_TOP_BOTTOM))))
return ret
def convertToPandasDF(X):
if not isinstance(X, pd.DataFrame):
return pd.DataFrame(X, columns=['C' + str(i) for i in range(getNumCols(X))])
return X
| apache-2.0 |
thilbern/scikit-learn | sklearn/neighbors/base.py | 7 | 25049 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..externals import six
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
with np.errstate(divide='ignore'):
dist = 1. / dist
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if (self.n_neighbors is None
or self.n_neighbors < self._fit_X.shape[0] // 2):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to point, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise ValueError("must fit neighbors before querying")
X = check_array(X, accept_sparse='csr')
if n_neighbors is None:
n_neighbors = self.n_neighbors
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
j = np.arange(neigh_ind.shape[0])[:, None]
neigh_ind = neigh_ind[j, np.argsort(dist[j, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
return np.sqrt(dist[j, neigh_ind]), neigh_ind
else:
return dist[j, neigh_ind], neigh_ind
else:
return neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
return result
else:
raise ValueError("internal: _fit_method not recognized")
def kneighbors_graph(self, X, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if n_neighbors is None:
n_neighbors = self.n_neighbors
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones((n_samples1, n_neighbors))
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
data, ind = self.kneighbors(X, n_neighbors + 1,
return_distance=True)
A_data, A_ind = data[:, 1:], ind[:, 1:]
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
return csr_matrix((A_data.ravel(), A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point or points
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the euclidean distances to each point,
only present if return_distance=True.
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.radius_neighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 1.5, 0.5]]...), array([[1, 2]]...)
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise ValueError("must fit neighbors before querying")
X = check_array(X, accept_sparse='csr')
if radius is None:
radius = self.radius
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = [np.where(d < radius)[0] for d in dist]
# if there are the same number of neighbors for each point,
# we can do a normal array. Otherwise, we return an object
# array with elements that are numpy arrays
try:
neigh_ind = np.asarray(neigh_ind, dtype=int)
dtype_F = float
except ValueError:
neigh_ind = np.asarray(neigh_ind, dtype='object')
dtype_F = object
if return_distance:
if self.effective_metric_ == 'euclidean':
dist = np.array([np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)],
dtype=dtype_F)
else:
dist = np.array([d[neigh_ind[i]]
for i, d in enumerate(dist)],
dtype=dtype_F)
return dist, neigh_ind
else:
return neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
ind, dist = results
return dist, ind
else:
return results
else:
raise ValueError("internal: _fit_method not recognized")
def radius_neighbors_graph(self, X, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if radius is None:
radius = self.radius
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_neighbors = np.array([len(a) for a in A_ind])
n_nonzero = np.sum(n_neighbors)
if A_data is None:
A_data = np.ones(n_nonzero)
A_ind = np.concatenate(list(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
| bsd-3-clause |
mjgrav2001/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 142 | 5990 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_not_mac_os
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_not_mac_os()
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
Rickyfox/MLMA2 | core/DataHandler.py | 1 | 1770 | '''
Created on Dec 17, 2014
@author: Dominik Lang
'''
import csv
import os.path
from random import shuffle
import collections
import numpy
from sklearn.preprocessing import Imputer
class DataHandler(object):
def __init__(self):
pass
'''
@summary: A method to handle reading the data in from the csv file
@return: List containing the rows of the dataset as seperate lists
'''
def readData(self):
# We get the path to the current file, then go one directory up to find the data file
basepath = os.path.dirname(__file__)
filepath = os.path.abspath(os.path.join(basepath, "..","data.csv"))
output=[]
with open(filepath, 'rb') as csvfile:
i=0
linereader = csv.reader(csvfile, delimiter=',')
for row in linereader:
if i==0:
i+=1
continue
output.append(row)
return output
'''
@summary: A method that splits the dataset into a training and a test set
'''
def splitData(self,dataset):
sets = collections.namedtuple('Sets', ['train', 'test'])
third=len(dataset)/3
shuffle(dataset)
testset=dataset[0:third]
trainset=dataset[third:-1]
s=sets(trainset,testset)
return s
def vectorizeData(self,dataset):
vectors = collections.namedtuple('vectors', ['X', 'Y'])
x=[]
y=[]
for i in dataset:
atts=i[0:-2]
c=i[-1]
x.append(atts)
y.append(c)
x=numpy.asarray(x)
y=numpy.asarray(y)
output=vectors(x,y)
return output
| gpl-2.0 |
siutanwong/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 142 | 18692 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
jjx02230808/project0223 | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
igabriel85/dmon-adp | adpformater/adpformater.py | 1 | 1615 | import pandas as pd
class DataFormatter():
def __init__(self, dataloc):
self.dataloc = dataloc
def aggJsonToCsv(self):
return "CSV file"
def expTimestamp(self):
return "Expand metric timestamp"
def window(self):
return "Window metrics"
def pivot(self):
return "Pivot values"
def addID(self):
return "Add new ID as index"
def removeID(self):
return "Remove selected column as index"
def renameHeader(self):
return "Rename headers"
def normalize(self):
return "Normalize data"
def denormalize(self):
return "Denormalize data"
input_table = pd.read_csv("metrics.csv")
for index, row in input_table.iterrows():
input_table = input_table.append([row]*9)
input_table = input_table.sort_values(['row ID'])
input_table = input_table.reset_index(drop=True)
for index, rows in input_table.iterrows():
if int(index) > 59:
print "Index to big!"
time = rows[0].split(", ", 1) #In Knime row for timestamp is row(55) last one
timeHour = time[1].split(":", 2)
timeHourSeconds = timeHour[2].split(".", 1)
timeHourSecondsDecimal = timeHour[2].split(".", 1)
timeHourSecondsDecimal[0] = str(index)
if len(timeHourSecondsDecimal[0]) == 1:
timeHourSecondsDecimal[0] = '0%s' %timeHourSecondsDecimal[0]
decimal = '.'.join(timeHourSecondsDecimal)
timeHour[2] = decimal
timenew = ':'.join(timeHour)
time[1] = timenew
finalString = ', '.join(time)
input_table.set_value(index, 'row ID', finalString)
input_table.to_csv('out.csv')
| apache-2.0 |
idlead/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 112 | 1819 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
junbochen/pylearn2 | pylearn2/scripts/papers/jia_huang_wkshp_11/evaluate.py | 44 | 3208 | from __future__ import print_function
from optparse import OptionParser
import warnings
try:
from sklearn.metrics import classification_report
except ImportError:
classification_report = None
warnings.warn("couldn't find sklearn.metrics.classification_report")
try:
from sklearn.metrics import confusion_matrix
except ImportError:
confusion_matrix = None
warnings.warn("couldn't find sklearn.metrics.metrics.confusion_matrix")
from galatea.s3c.feature_loading import get_features
from pylearn2.utils import serial
from pylearn2.datasets.cifar10 import CIFAR10
from pylearn2.datasets.cifar100 import CIFAR100
import numpy as np
def test(model, X, y):
print("Evaluating svm")
y_pred = model.predict(X)
#try:
if True:
acc = (y == y_pred).mean()
print("Accuracy ",acc)
"""except:
print("something went wrong")
print('y:')
print(y)
print('y_pred:')
print(y_pred)
print('extra info')
print(type(y))
print(type(y_pred))
print(y.dtype)
print(y_pred.dtype)
print(y.shape)
print(y_pred.shape)
raise
"""
#
def get_test_labels(cifar10, cifar100, stl10):
assert cifar10 + cifar100 + stl10 == 1
if stl10:
print('loading entire stl-10 test set just to get the labels')
stl10 = serial.load("${PYLEARN2_DATA_PATH}/stl10/stl10_32x32/test.pkl")
return stl10.y
if cifar10:
print('loading entire cifar10 test set just to get the labels')
cifar10 = CIFAR10(which_set = 'test')
return np.asarray(cifar10.y)
if cifar100:
print('loading entire cifar100 test set just to get the fine labels')
cifar100 = CIFAR100(which_set = 'test')
return np.asarray(cifar100.y_fine)
assert False
def main(model_path,
test_path,
dataset,
**kwargs):
model = serial.load(model_path)
cifar100 = dataset == 'cifar100'
cifar10 = dataset == 'cifar10'
stl10 = dataset == 'stl10'
assert cifar10 + cifar100 + stl10 == 1
y = get_test_labels(cifar10, cifar100, stl10)
X = get_features(test_path, False, False)
if stl10:
num_examples = 8000
if cifar10 or cifar100:
num_examples = 10000
if not X.shape[0] == num_examples:
raise AssertionError('Expected %d examples but got %d' % (num_examples, X.shape[0]))
assert y.shape[0] == num_examples
test(model,X,y)
if __name__ == '__main__':
"""
Useful for quick tests.
Usage: python train_bilinear.py
"""
parser = OptionParser()
parser.add_option("-m", "--model",
action="store", type="string", dest="model_path")
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-o", action="store", dest="output", default = None, help="path to write the report to")
parser.add_option('--dataset', type='string', dest = 'dataset', action='store', default = None)
#(options, args) = parser.parse_args()
#assert options.output
main(model_path='final_model.pkl',
test_path='test_features.npy',
dataset = 'cifar100',
)
| bsd-3-clause |
fboers/jumeg | examples/do_MLICA.py | 1 | 5891 | """
Compute ICA object based on filtered and downsampled data.
Identify ECG and EOG artifacts using MLICA and compare
results to correlation & ctps analysis.
Apply ICA object to filtered and unfiltered data.
Ahmad Hasasneh, Nikolas Kampel, Praveen Sripad, N. Jon Shah, and Juergen Dammers
"Deep Learning Approach for Automatic Classification of Ocular and Cardiac
Artifacts in MEG Data"
Journal of Engineering, vol. 2018, Article ID 1350692,10 pages, 2018.
https://doi.org/10.1155/2018/1350692
"""
import os.path as op
import matplotlib.pylab as plt
plt.ion()
import numpy as np
import mne
from jumeg.decompose.ica_replace_mean_std import ICA, ica_update_mean_std
from keras.models import load_model
from jumeg.jumeg_noise_reducer import noise_reducer
from jumeg.jumeg_preprocessing import get_ics_cardiac, get_ics_ocular
from jumeg.jumeg_plot import plot_performance_artifact_rejection
from jumeg.jumeg_utils import get_jumeg_path
# config
MLICA_threshold = 0.8
n_components = 60
njobs = 4 # for downsampling
tmin = 0
tmax = tmin + 15000
flow_ecg, fhigh_ecg = 8, 20
flow_eog, fhigh_eog = 1, 20
ecg_thresh, eog_thresh = 0.3, 0.3
ecg_ch = 'ECG 001'
eog1_ch = 'EOG 001'
eog2_ch = 'EOG 002'
reject = {'mag': 5e-12}
refnotch = [50., 100., 150., 200., 250., 300., 350., 400.]
data_path = op.join(get_jumeg_path(), 'data')
print(data_path)
# example filname
raw_fname = "/Volumes/megraid21/sripad/cau_fif_data/jumeg_test_data/" \
"109925_CAU01A_100715_0842_2_c,rfDC-raw.fif"
# load the model for artifact rejection
# the details of the model is provided in the x_validation_shuffle_v4_split_23.txt
model_name = op.join(data_path, "dcnn_model.hdf5")
model = load_model(model_name)
# noise reducer
raw_nr = noise_reducer(raw_fname, reflp=5., return_raw=True)
raw_nr = noise_reducer(raw_fname, raw=raw_nr, refhp=0.1, noiseref=['RFG ...'],
return_raw=True)
# 50HZ and 60HZ notch filter to remove noise
raw = noise_reducer(raw_fname, raw=raw_nr, refnotch=refnotch, return_raw=True)
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
stim=False, exclude='bads')
raw_filtered = raw.copy().filter(0., 45., picks=picks, filter_length='auto',
l_trans_bandwidth='auto', h_trans_bandwidth='auto',
n_jobs=njobs, method='fir', phase='zero',
fir_window='hamming')
# downsample the data to 250 Hz, necessary for the model
raw_ds = raw_filtered.copy().resample(250, npad='auto', window='boxcar', stim_picks=None,
n_jobs=njobs, events=None)
raw_ds_chop = raw_ds.copy().crop(tmin=tmin*4./1000, tmax=tmax*4./1000) # downsampled raw
raw_filtered_chop = raw_filtered.copy().crop(tmin=tmin*4./1000, tmax=tmax*4./1000)
raw_chop = raw.copy().crop(tmin=tmin*4./1000, tmax=tmax*4./1000)
ica = ICA(method='fastica', n_components=n_components, random_state=42,
max_pca_components=None, max_iter=5000, verbose=None)
# do the ICA decomposition on downsampled raw
ica.fit(raw_ds_chop, picks=picks, reject=reject, verbose=None)
sources = ica.get_sources(raw_ds_chop)._data
# extract temporal and spatial components
mm = np.float32(np.dot(ica.mixing_matrix_[:, :].T,
ica.pca_components_[:ica.n_components_]))
# use [:, :15000] to make sure it's 15000 data points
chop = sources[:, :15000]
chop_reshaped = np.reshape(chop, (len(chop), len(chop[0]), 1))
model_scores = model.predict([mm, chop_reshaped], verbose=1)
bads_MLICA = []
# print model_scores
for idx in range(0, len(model_scores)):
if model_scores[idx][0] > MLICA_threshold:
bads_MLICA.append(idx)
# visualisation
# ica.exclude = bads_MLICA
# ica.plot_sources(raw_ds_chop, block=True)
# compare MLICA to results from correlation and ctps analysis
ica.exclude = []
print('Identifying components..')
# get ECG/EOG related components using JuMEG
ic_ecg = get_ics_cardiac(raw_filtered_chop, ica, flow=flow_ecg, fhigh=fhigh_ecg,
thresh=ecg_thresh, tmin=-0.5, tmax=0.5,
name_ecg=ecg_ch, use_CTPS=True)[0] # returns both ICs and scores (take only ICs)
ic_eog = get_ics_ocular(raw_filtered_chop, ica, flow=flow_eog, fhigh=fhigh_eog,
thresh=eog_thresh, name_eog_hor=eog1_ch,
name_eog_ver=eog2_ch, score_func='pearsonr')
bads_corr_ctps = list(ic_ecg) + list(ic_eog)
bads_corr_ctps = list(set(bads_corr_ctps)) # remove potential duplicates
bads_corr_ctps.sort()
# visualisation
# ica.exclude = bads_corr_ctps
# ica.plot_sources(raw_chop, block=True)
print('Bad components from MLICA:', bads_MLICA)
print('Bad components from correlation & ctps:', bads_corr_ctps)
# apply MLICA result to filtered and unfiltered data
# exclude bad components identified by MLICA
ica.exclude = bads_MLICA
fnout_fig = '109925_CAU01A_100715_0842_2_c,rfDC,0-45hz,ar-perf'
ica_filtered_chop = ica_update_mean_std(raw_filtered_chop, ica, picks=picks, reject=reject)
raw_filtered_chop_clean = ica_filtered_chop.apply(raw_filtered_chop, exclude=ica.exclude,
n_pca_components=None)
ica_unfiltered_chop = ica_update_mean_std(raw_chop, ica, picks=picks, reject=reject)
raw_unfiltered_chop_clean = ica_unfiltered_chop.apply(raw_chop, exclude=ica.exclude, n_pca_components=None)
# create copy of original data since apply_ica_replace_mean_std changes the input data in place (raw and ica)
raw_copy = raw.copy().crop(tmin=tmin*4./1000, tmax=tmax*4./1000)
plot_performance_artifact_rejection(raw_copy, ica_unfiltered_chop, fnout_fig,
meg_clean=raw_unfiltered_chop_clean,
show=False, verbose=False,
name_ecg=ecg_ch,
name_eog=eog2_ch)
| bsd-3-clause |