repo_name
stringlengths 6
100
| path
stringlengths 4
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 935
727k
| license
stringclasses 15
values |
---|---|---|---|---|---|
benbo/FastDocumentClusters | fast_document_clusters.py | 1 | 8883 | # -*- coding: utf-8 -*-
import time
import os,sys
import itertools
import math
import argparse
import numpy as np
from multiprocessing import Pool
from hashlib import sha1
import random, struct
from random import sample,choice
from sklearn import metrics
#we truncate sha1 for now. We should probably replace this with a proper hash function.
M_PRIME = (1 << 89) - 1 #(x << n) is x shifted left by n bit
MAX_HASH = (1 << 64) - 1
NUM_PERM=100
random.seed(427)
A,B = np.array([(random.randint(1, M_PRIME),random.randint(0, M_PRIME)) for _ in range(NUM_PERM)]).T
#############
# functions #
#############
def set_permutations(numperm):
NUM_PERM=numperm
A,B = np.array([(random.randint(1, M_PRIME),random.randint(0, M_PRIME)) for _ in range(NUM_PERM)]).T
def get_permuted_hashes(token):
# get a hash value
#abusing sha1 and truncating to 12 digit number
hv=int(sha1(token).hexdigest(),16)% (10 ** 12)
#do Carter and Wegman like hashing.
return np.bitwise_and((A * hv + B) % M_PRIME,MAX_HASH)
def get_clusters(fn):
with open(fn,'r') as f:
next(f)#skip header
for line in f:
a=line.split(',')
yield a[0],a[2]
def get_lsh(sig,nbands):
for i,band in enumerate(np.array_split(sig,nbands)):
yield sha1("ab" + str(band) + "ba"+str(i)).digest()
def get_bandwidth(n, tr):
"""
Threshold tr = (1/b) ** (1/r) where
b #bands
r #rows per band
n = b * r #elements in signature
"""
best = n, 1
minerr = float("inf")
for r in range(1, n + 1):
try:
b = 1. / (tr ** r)
except:
return best
err = abs(n - b * r)
if err < minerr:
best = r
minerr = err
return best
def connected(seed,lshdict,doc2lsh,t):
'''
Computes clusters based on the lsh bucket candidates.
We do not actually check the full connected component.
We only check for similar docs amongst the lsh candidates for each cluster member.
'''
cluster=set([seed])
#get candidates and flatten list
base=set([seed])
while len(base)>0:
s=base.pop()
#get candidates and flatten list
candidates=set(itertools.chain.from_iterable([lshdict[sig] for sig in doc2lsh[s]]))
m1=hashcorp[s]
for cand in candidates:
if cand in cluster:continue#don't check if we've already added this
m2=hashcorp[cand]
if jaccard(m1,m2) >=t:
cluster.add(cand)
base.add(cand)
#all candidates have been checked
return cluster
def jaccard(h1,h2):
'''
Compute jaccard similarity between two minhash signatures.
Make sure to only compute jaccard similarity for hashes created with same hash functions (i.e. same seed for random permutation)
'''
return np.float(np.count_nonzero(h1==h2)) /np.float(h2.size)
def near_duplicates(seed,lshdict,doc2lsh,t):
cluster=set([seed])
#get candidates and flatten list
candidates=set(itertools.chain.from_iterable([lshdict[sig] for sig in doc2lsh[seed]]))
m1=hashcorp[seed]
for cand in candidates:
if cand in cluster:continue#don't check if we've already added this
m2=hashcorp[cand]
if jaccard(m2,m1) >=t:
cluster.add(cand)
#all candidates have been checked
return cluster
def compute_clusters(obj):
thr=obj[0]
bandwidth=get_bandwidth(NUM_PERM, thr)#r
bands=int(math.ceil(float(NUM_PERM)/float(bandwidth)))#b
print("starting calculations for threshold "+str(thr)+"\nnumber of lsh bands: "+str(bands))
sys.stdout.flush()
start_time = time.time()
doc_to_lsh={}
lsh_dict={}
for key,m in hashcorp.items():
#compute lsh
signatures = [sig for sig in get_lsh(m,bands)]
#store signatures for this document
doc_to_lsh[key]=signatures
#store lsh signature to key
for sig in signatures:
if sig in lsh_dict:
lsh_dict[sig].append(key)
else:
lsh_dict[sig]=[key]
print(("Calculating lsh signatures for threshold "+str(thr)+" took\n ---%s seconds ---\n" % (time.time() - start_time)))
sys.stdout.flush()
#compute connected components
start_time = time.time()
doc2cluster={}
count=0
for doc in hashcorp:
if doc not in doc2cluster:
cl=connected(doc,lsh_dict,doc_to_lsh,thr)
doc2cluster.update({i:count for i in cl })
count+=1
print(("Computing connected components for threshold: "+str(thr)+" took\n--- %s seconds ---\n" % (time.time() - start_time)))
print("write results to file")
start_time = time.time()
f=open(outdir+'/doc2cluster_'+str(thr)+'_'+suffix+'.csv','w')
f.write('line,cluster\n')
for key, value in doc2cluster.items():
f.write(str(key)+','+str(value)+'\n')
f.close()
print(("Writing results to files for threshold "+str(thr)+" took:\n--- %s seconds ---\n" % (time.time() - start_time)))
#Set up command line arguments
parser = argparse.ArgumentParser(description='Calculate connected components of documents with given threshold(s)')
parser.add_argument("-t", dest="threshold",type=float,help="threshold for ER", metavar="T")
parser.add_argument("-lt", dest="lt",type=float,help="lower threshold for ER", metavar="TL")
parser.add_argument("-ut", dest="ut",type=float,help="upper threshold for ER", metavar="TU")
parser.add_argument("-out", dest="out",help="output directory", metavar="OUT")
parser.add_argument("-steps", dest="steps",type=float,help="number of steps between lower and upper threshold", metavar="TSTEP")
parser.add_argument("-sigl", dest="num_permutations",type=int,help="minhash signature length", metavar="SIG")
parser.add_argument("-suff", dest="suffix",help="output file suffix", metavar="S")
parser.add_argument("-infile", dest="infile",help="input file",required=True, metavar="IF")
parser.add_argument('-header', dest='header', action='store_true')
parser.add_argument('-near_dups', dest='near_dups',help="Do near duplicate detection. If this is not set, connected components will be computed", action='store_true')
parser.add_argument("-p", dest="nump", required=False,type=int,help="number of processes for multithreading", metavar="NUMP")
parser.set_defaults(match=False)
parser.set_defaults(header=True)
parser.set_defaults(near_dups=True)
parser.set_defaults(threshold=None)
parser.set_defaults(num_permutations=100)
parser.set_defaults(lt=0.0)
parser.set_defaults(ut=1.0)
parser.set_defaults(steps=2)
parser.set_defaults(nump=1)
parser.set_defaults(suffix='')
parser.set_defaults(out='out')
if __name__ == "__main__":
#fetch command line arguments
args = parser.parse_args()
num_processes=args.nump
suffix=args.suffix
if NUM_PERM!=args.num_permutations:
set_permutations(args.num_permutations)
#create output directory if it does not exist
outdir=args.out
if not os.path.exists(outdir):
os.makedirs(outdir)
thresholds=[]
lt=args.lt
near_dups=args.near_dups
ut=args.ut
steps=args.steps
if args.threshold is not None:
thresholds=[args.threshold]
else:
if None in [lt,ut,steps]:
print("need lower threshold, upper threshold, and number of steps")
exit()
else:
thresholds=np.linspace(lt, ut, num=steps)
#load text. Flat file for now
print('load text')
start_time = time.time()
with open(args.infile,'r') as f:
if args.header:
next(f)
#TODO test robustness
#mycorpus=[(i,set(line.encode('utf8', 'ignore').lower().split())) for i,line in enumerate(f)]
mycorpus=[(i,set(line.lower().split())) for i,line in enumerate(f)]
print(("--- %s seconds ---" % (time.time() - start_time)))
print('Calculate minhash signatures')
start_time = time.time()
#prepare dictionary of hashes
hashcorp=dict.fromkeys([tup[0] for tup in mycorpus])
#compute hashes
for key,doc in mycorpus:
#compute minhash signature
hashvalues=np.empty(NUM_PERM)
hashvalues.fill(MAX_HASH)
for token in doc:
#np.minimum(get_permuted_hashes(token.encode('utf-8','ignore')), hashvalues)
np.minimum(get_permuted_hashes(token), hashvalues)
hashcorp[key]=hashvalues
print(("--- %s seconds ---" % (time.time() - start_time)))
if num_processes> 1:
if len(thresholds)<num_processes:
num_processes=len(thresholds)
p=Pool(num_processes)
assignment=[ (x,) for x in thresholds]
p.map(compute_clusters,assignment)
else:
for x in thresholds:
compute_clusters((x,))
| bsd-3-clause |
belavenir/rpg_svo | svo_analysis/src/svo_analysis/tum_benchmark_tools/evaluate_rpe.py | 20 | 15381 | #!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Juergen Sturm, TUM
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of TUM nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This script computes the relative pose error from the ground truth trajectory
and the estimated trajectory.
"""
import argparse
import random
import numpy
import sys
_EPS = numpy.finfo(float).eps * 4.0
def transform44(l):
"""
Generate a 4x4 homogeneous transformation matrix from a 3D point and unit quaternion.
Input:
l -- tuple consisting of (stamp,tx,ty,tz,qx,qy,qz,qw) where
(tx,ty,tz) is the 3D position and (qx,qy,qz,qw) is the unit quaternion.
Output:
matrix -- 4x4 homogeneous transformation matrix
"""
t = l[1:4]
q = numpy.array(l[4:8], dtype=numpy.float64, copy=True)
nq = numpy.dot(q, q)
if nq < _EPS:
return numpy.array((
( 1.0, 0.0, 0.0, t[0])
( 0.0, 1.0, 0.0, t[1])
( 0.0, 0.0, 1.0, t[2])
( 0.0, 0.0, 0.0, 1.0)
), dtype=numpy.float64)
q *= numpy.sqrt(2.0 / nq)
q = numpy.outer(q, q)
return numpy.array((
(1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], t[0]),
( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], t[1]),
( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], t[2]),
( 0.0, 0.0, 0.0, 1.0)
), dtype=numpy.float64)
def read_trajectory(filename, matrix=True):
"""
Read a trajectory from a text file.
Input:
filename -- file to be read
matrix -- convert poses to 4x4 matrices
Output:
dictionary of stamped 3D poses
"""
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
list = [[float(v.strip()) for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"]
list_ok = []
for i,l in enumerate(list):
if l[4:8]==[0,0,0,0]:
continue
isnan = False
for v in l:
if numpy.isnan(v):
isnan = True
break
if isnan:
sys.stderr.write("Warning: line %d of file '%s' has NaNs, skipping line\n"%(i,filename))
continue
list_ok.append(l)
if matrix :
traj = dict([(l[0],transform44(l[0:])) for l in list_ok])
else:
traj = dict([(l[0],l[1:8]) for l in list_ok])
return traj
def find_closest_index(L,t):
"""
Find the index of the closest value in a list.
Input:
L -- the list
t -- value to be found
Output:
index of the closest element
"""
beginning = 0
difference = abs(L[0] - t)
best = 0
end = len(L)
while beginning < end:
middle = int((end+beginning)/2)
if abs(L[middle] - t) < difference:
difference = abs(L[middle] - t)
best = middle
if t == L[middle]:
return middle
elif L[middle] > t:
end = middle
else:
beginning = middle + 1
return best
def ominus(a,b):
"""
Compute the relative 3D transformation between a and b.
Input:
a -- first pose (homogeneous 4x4 matrix)
b -- second pose (homogeneous 4x4 matrix)
Output:
Relative 3D transformation from a to b.
"""
return numpy.dot(numpy.linalg.inv(a),b)
def scale(a,scalar):
"""
Scale the translational components of a 4x4 homogeneous matrix by a scale factor.
"""
return numpy.array(
[[a[0,0], a[0,1], a[0,2], a[0,3]*scalar],
[a[1,0], a[1,1], a[1,2], a[1,3]*scalar],
[a[2,0], a[2,1], a[2,2], a[2,3]*scalar],
[a[3,0], a[3,1], a[3,2], a[3,3]]]
)
def compute_distance(transform):
"""
Compute the distance of the translational component of a 4x4 homogeneous matrix.
"""
return numpy.linalg.norm(transform[0:3,3])
def compute_angle(transform):
"""
Compute the rotation angle from a 4x4 homogeneous matrix.
"""
# an invitation to 3-d vision, p 27
return numpy.arccos( min(1,max(-1, (numpy.trace(transform[0:3,0:3]) - 1)/2) ))
def distances_along_trajectory(traj):
"""
Compute the translational distances along a trajectory.
"""
keys = traj.keys()
keys.sort()
motion = [ominus(traj[keys[i+1]],traj[keys[i]]) for i in range(len(keys)-1)]
distances = [0]
sum = 0
for t in motion:
sum += compute_distance(t)
distances.append(sum)
return distances
def rotations_along_trajectory(traj,scale):
"""
Compute the angular rotations along a trajectory.
"""
keys = traj.keys()
keys.sort()
motion = [ominus(traj[keys[i+1]],traj[keys[i]]) for i in range(len(keys)-1)]
distances = [0]
sum = 0
for t in motion:
sum += compute_angle(t)*scale
distances.append(sum)
return distances
def evaluate_trajectory(traj_gt,traj_est,param_max_pairs=10000,param_fixed_delta=False,param_delta=1.00,param_delta_unit="s",param_offset=0.00,param_scale=1.00):
"""
Compute the relative pose error between two trajectories.
Input:
traj_gt -- the first trajectory (ground truth)
traj_est -- the second trajectory (estimated trajectory)
param_max_pairs -- number of relative poses to be evaluated
param_fixed_delta -- false: evaluate over all possible pairs
true: only evaluate over pairs with a given distance (delta)
param_delta -- distance between the evaluated pairs
param_delta_unit -- unit for comparison:
"s": seconds
"m": meters
"rad": radians
"deg": degrees
"f": frames
param_offset -- time offset between two trajectories (to model the delay)
param_scale -- scale to be applied to the second trajectory
Output:
list of compared poses and the resulting translation and rotation error
"""
stamps_gt = list(traj_gt.keys())
stamps_est = list(traj_est.keys())
stamps_gt.sort()
stamps_est.sort()
stamps_est_return = []
for t_est in stamps_est:
t_gt = stamps_gt[find_closest_index(stamps_gt,t_est + param_offset)]
t_est_return = stamps_est[find_closest_index(stamps_est,t_gt - param_offset)]
t_gt_return = stamps_gt[find_closest_index(stamps_gt,t_est_return + param_offset)]
if not t_est_return in stamps_est_return:
stamps_est_return.append(t_est_return)
if(len(stamps_est_return)<2):
raise Exception("Number of overlap in the timestamps is too small. Did you run the evaluation on the right files?")
if param_delta_unit=="s":
index_est = list(traj_est.keys())
index_est.sort()
elif param_delta_unit=="m":
index_est = distances_along_trajectory(traj_est)
elif param_delta_unit=="rad":
index_est = rotations_along_trajectory(traj_est,1)
elif param_delta_unit=="deg":
index_est = rotations_along_trajectory(traj_est,180/numpy.pi)
elif param_delta_unit=="f":
index_est = range(len(traj_est))
else:
raise Exception("Unknown unit for delta: '%s'"%param_delta_unit)
if not param_fixed_delta:
if(param_max_pairs==0 or len(traj_est)<numpy.sqrt(param_max_pairs)):
pairs = [(i,j) for i in range(len(traj_est)) for j in range(len(traj_est))]
else:
pairs = [(random.randint(0,len(traj_est)-1),random.randint(0,len(traj_est)-1)) for i in range(param_max_pairs)]
else:
pairs = []
for i in range(len(traj_est)):
j = find_closest_index(index_est,index_est[i] + param_delta)
if j!=len(traj_est)-1:
pairs.append((i,j))
if(param_max_pairs!=0 and len(pairs)>param_max_pairs):
pairs = random.sample(pairs,param_max_pairs)
gt_interval = numpy.median([s-t for s,t in zip(stamps_gt[1:],stamps_gt[:-1])])
gt_max_time_difference = 2*gt_interval
result = []
for i,j in pairs:
stamp_est_0 = stamps_est[i]
stamp_est_1 = stamps_est[j]
stamp_gt_0 = stamps_gt[ find_closest_index(stamps_gt,stamp_est_0 + param_offset) ]
stamp_gt_1 = stamps_gt[ find_closest_index(stamps_gt,stamp_est_1 + param_offset) ]
if(abs(stamp_gt_0 - (stamp_est_0 + param_offset)) > gt_max_time_difference or
abs(stamp_gt_1 - (stamp_est_1 + param_offset)) > gt_max_time_difference):
continue
error44 = ominus( scale(
ominus( traj_est[stamp_est_1], traj_est[stamp_est_0] ),param_scale),
ominus( traj_gt[stamp_gt_1], traj_gt[stamp_gt_0] ) )
trans = compute_distance(error44)
rot = compute_angle(error44)
result.append([stamp_est_0,stamp_est_1,stamp_gt_0,stamp_gt_1,trans,rot])
if len(result)<2:
raise Exception("Couldn't find matching timestamp pairs between groundtruth and estimated trajectory!")
return result
def percentile(seq,q):
"""
Return the q-percentile of a list
"""
seq_sorted = list(seq)
seq_sorted.sort()
return seq_sorted[int((len(seq_sorted)-1)*q)]
if __name__ == '__main__':
random.seed(0)
parser = argparse.ArgumentParser(description='''
This script computes the relative pose error from the ground truth trajectory and the estimated trajectory.
''')
parser.add_argument('groundtruth_file', help='ground-truth trajectory file (format: "timestamp tx ty tz qx qy qz qw")')
parser.add_argument('estimated_file', help='estimated trajectory file (format: "timestamp tx ty tz qx qy qz qw")')
parser.add_argument('--max_pairs', help='maximum number of pose comparisons (default: 10000, set to zero to disable downsampling)', default=10000)
parser.add_argument('--fixed_delta', help='only consider pose pairs that have a distance of delta delta_unit (e.g., for evaluating the drift per second/meter/radian)', action='store_true')
parser.add_argument('--delta', help='delta for evaluation (default: 1.0)',default=1.0)
parser.add_argument('--delta_unit', help='unit of delta (options: \'s\' for seconds, \'m\' for meters, \'rad\' for radians, \'f\' for frames; default: \'s\')',default='s')
parser.add_argument('--offset', help='time offset between ground-truth and estimated trajectory (default: 0.0)',default=0.0)
parser.add_argument('--scale', help='scaling factor for the estimated trajectory (default: 1.0)',default=1.0)
parser.add_argument('--save', help='text file to which the evaluation will be saved (format: stamp_est0 stamp_est1 stamp_gt0 stamp_gt1 trans_error rot_error)')
parser.add_argument('--plot', help='plot the result to a file (requires --fixed_delta, output format: png)')
parser.add_argument('--verbose', help='print all evaluation data (otherwise, only the mean translational error measured in meters will be printed)', action='store_true')
args = parser.parse_args()
if args.plot and not args.fixed_delta:
sys.exit("The '--plot' option can only be used in combination with '--fixed_delta'")
traj_gt = read_trajectory(args.groundtruth_file)
traj_est = read_trajectory(args.estimated_file)
result = evaluate_trajectory(traj_gt,
traj_est,
int(args.max_pairs),
args.fixed_delta,
float(args.delta),
args.delta_unit,
float(args.offset),
float(args.scale))
stamps = numpy.array(result)[:,0]
trans_error = numpy.array(result)[:,4]
rot_error = numpy.array(result)[:,5]
if args.save:
f = open(args.save,"w")
f.write("\n".join([" ".join(["%f"%v for v in line]) for line in result]))
f.close()
if args.verbose:
print "compared_pose_pairs %d pairs"%(len(trans_error))
print "translational_error.rmse %f m"%numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error))
print "translational_error.mean %f m"%numpy.mean(trans_error)
print "translational_error.median %f m"%numpy.median(trans_error)
print "translational_error.std %f m"%numpy.std(trans_error)
print "translational_error.min %f m"%numpy.min(trans_error)
print "translational_error.max %f m"%numpy.max(trans_error)
print "rotational_error.rmse %f deg"%(numpy.sqrt(numpy.dot(rot_error,rot_error) / len(rot_error)) * 180.0 / numpy.pi)
print "rotational_error.mean %f deg"%(numpy.mean(rot_error) * 180.0 / numpy.pi)
print "rotational_error.median %f deg"%numpy.median(rot_error)
print "rotational_error.std %f deg"%(numpy.std(rot_error) * 180.0 / numpy.pi)
print "rotational_error.min %f deg"%(numpy.min(rot_error) * 180.0 / numpy.pi)
print "rotational_error.max %f deg"%(numpy.max(rot_error) * 180.0 / numpy.pi)
else:
print numpy.mean(trans_error)
if args.plot:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(stamps - stamps[0],trans_error,'-',color="blue")
#ax.plot([t for t,e in err_rot],[e for t,e in err_rot],'-',color="red")
ax.set_xlabel('time [s]')
ax.set_ylabel('translational error [m]')
plt.savefig(args.plot,dpi=300)
| gpl-3.0 |
aleksandr-bakanov/astropy | examples/coordinates/plot_galactocentric-frame.py | 2 | 7979 | # -*- coding: utf-8 -*-
"""
========================================================================
Transforming positions and velocities to and from a Galactocentric frame
========================================================================
This document shows a few examples of how to use and customize the
`~astropy.coordinates.Galactocentric` frame to transform Heliocentric sky
positions, distance, proper motions, and radial velocities to a Galactocentric,
Cartesian frame, and the same in reverse.
The main configurable parameters of the `~astropy.coordinates.Galactocentric`
frame control the position and velocity of the solar system barycenter within
the Galaxy. These are specified by setting the ICRS coordinates of the
Galactic center, the distance to the Galactic center (the sun-galactic center
line is always assumed to be the x-axis of the Galactocentric frame), and the
Cartesian 3-velocity of the sun in the Galactocentric frame. We'll first
demonstrate how to customize these values, then show how to set the solar motion
instead by inputting the proper motion of Sgr A*.
Note that, for brevity, we may refer to the solar system barycenter as just "the
sun" in the examples below.
*By: Adrian Price-Whelan*
*License: BSD*
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the necessary astropy subpackages
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# Let's first define a barycentric coordinate and velocity in the ICRS frame.
# We'll use the data for the star HD 39881 from the `Simbad
# <simbad.harvard.edu/simbad/>`_ database:
c1 = coord.ICRS(ra=89.014303*u.degree, dec=13.924912*u.degree,
distance=(37.59*u.mas).to(u.pc, u.parallax()),
pm_ra_cosdec=372.72*u.mas/u.yr,
pm_dec=-483.69*u.mas/u.yr,
radial_velocity=0.37*u.km/u.s)
##############################################################################
# This is a high proper-motion star; suppose we'd like to transform its position
# and velocity to a Galactocentric frame to see if it has a large 3D velocity
# as well. To use the Astropy default solar position and motion parameters, we
# can simply do:
gc1 = c1.transform_to(coord.Galactocentric)
##############################################################################
# From here, we can access the components of the resulting
# `~astropy.coordinates.Galactocentric` instance to see the 3D Cartesian
# velocity components:
print(gc1.v_x, gc1.v_y, gc1.v_z)
##############################################################################
# The default parameters for the `~astropy.coordinates.Galactocentric` frame
# are detailed in the linked documentation, but we can modify the most commonly
# changes values using the keywords ``galcen_distance``, ``galcen_v_sun``, and
# ``z_sun`` which set the sun-Galactic center distance, the 3D velocity vector
# of the sun, and the height of the sun above the Galactic midplane,
# respectively. The velocity of the sun can be specified as an
# `~astropy.units.Quantity` object with velocity units and is interepreted as a
# Cartesian velocity, as in the example below. Note that, as with the positions,
# the Galactocentric frame is a right-handed system (i.e., the Sun is at negative
# x values) so ``v_x`` is opposite of the Galactocentric radial velocity:
v_sun = [11.1, 244, 7.25] * (u.km / u.s) # [vx, vy, vz]
gc_frame = coord.Galactocentric(
galcen_distance=8*u.kpc,
galcen_v_sun=v_sun,
z_sun=0*u.pc)
##############################################################################
# We can then transform to this frame instead, with our custom parameters:
gc2 = c1.transform_to(gc_frame)
print(gc2.v_x, gc2.v_y, gc2.v_z)
##############################################################################
# It's sometimes useful to specify the solar motion using the `proper motion
# of Sgr A* <https://arxiv.org/abs/astro-ph/0408107>`_ instead of Cartesian
# velocity components. With an assumed distance, we can convert proper motion
# components to Cartesian velocity components using `astropy.units`:
galcen_distance = 8*u.kpc
pm_gal_sgrA = [-6.379, -0.202] * u.mas/u.yr # from Reid & Brunthaler 2004
vy, vz = -(galcen_distance * pm_gal_sgrA).to(u.km/u.s, u.dimensionless_angles())
##############################################################################
# We still have to assume a line-of-sight velocity for the Galactic center,
# which we will again take to be 11 km/s:
vx = 11.1 * u.km/u.s
v_sun2 = u.Quantity([vx, vy, vz]) # List of Quantity -> a single Quantity
gc_frame2 = coord.Galactocentric(galcen_distance=galcen_distance,
galcen_v_sun=v_sun2,
z_sun=0*u.pc)
gc3 = c1.transform_to(gc_frame2)
print(gc3.v_x, gc3.v_y, gc3.v_z)
##############################################################################
# The transformations also work in the opposite direction. This can be useful
# for transforming simulated or theoretical data to observable quantities. As
# an example, we'll generate 4 theoretical circular orbits at different
# Galactocentric radii with the same circular velocity, and transform them to
# Heliocentric coordinates:
ring_distances = np.arange(10, 25+1, 5) * u.kpc
circ_velocity = 220 * u.km/u.s
phi_grid = np.linspace(90, 270, 512) * u.degree # grid of azimuths
ring_rep = coord.CylindricalRepresentation(
rho=ring_distances[:,np.newaxis],
phi=phi_grid[np.newaxis],
z=np.zeros_like(ring_distances)[:,np.newaxis])
angular_velocity = (-circ_velocity / ring_distances).to(u.mas/u.yr,
u.dimensionless_angles())
ring_dif = coord.CylindricalDifferential(
d_rho=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s,
d_phi=angular_velocity[:,np.newaxis],
d_z=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s
)
ring_rep = ring_rep.with_differentials(ring_dif)
gc_rings = coord.Galactocentric(ring_rep)
##############################################################################
# First, let's visualize the geometry in Galactocentric coordinates. Here are
# the positions and velocities of the rings; note that in the velocity plot,
# the velocities of the 4 rings are identical and thus overlaid under the same
# curve:
fig,axes = plt.subplots(1, 2, figsize=(12,6))
# Positions
axes[0].plot(gc_rings.x.T, gc_rings.y.T, marker='None', linewidth=3)
axes[0].text(-8., 0, r'$\odot$', fontsize=20)
axes[0].set_xlim(-30, 30)
axes[0].set_ylim(-30, 30)
axes[0].set_xlabel('$x$ [kpc]')
axes[0].set_ylabel('$y$ [kpc]')
# Velocities
axes[1].plot(gc_rings.v_x.T, gc_rings.v_y.T, marker='None', linewidth=3)
axes[1].set_xlim(-250, 250)
axes[1].set_ylim(-250, 250)
axes[1].set_xlabel('$v_x$ [{0}]'.format((u.km/u.s).to_string("latex_inline")))
axes[1].set_ylabel('$v_y$ [{0}]'.format((u.km/u.s).to_string("latex_inline")))
fig.tight_layout()
##############################################################################
# Now we can transform to Galactic coordinates and visualize the rings in
# observable coordinates:
gal_rings = gc_rings.transform_to(coord.Galactic)
fig,ax = plt.subplots(1, 1, figsize=(8,6))
for i in range(len(ring_distances)):
ax.plot(gal_rings[i].l.degree, gal_rings[i].pm_l_cosb.value,
label=str(ring_distances[i]), marker='None', linewidth=3)
ax.set_xlim(360, 0)
ax.set_xlabel('$l$ [deg]')
ax.set_ylabel(r'$\mu_l \, \cos b$ [{0}]'.format((u.mas/u.yr).to_string('latex_inline')))
ax.legend()
| bsd-3-clause |
annahs/atmos_research | util_display_hysplit_traj.py | 1 | 1661 | import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.basemap import Basemap
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
import os
import sys
import matplotlib.colors
import colorsys
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import pickle
import mmap
tdump_file = open('C:/Users/Sarah Hanna/Documents/Data/Alert Data/Alert-March 2016/tdump201603', 'r')
print file
data_start = False
for line in tdump_file:
newline = line.split()
if data_start == True:
lat = float(newline[9])
lon = float(newline[10])
pressure = float(newline[11]) #in hPa
year = int(newline[2])
month = int(newline[3])
day = int(newline[4])
hour = int(newline[5])
endpoint = [lat, lon]
endpoints.append(endpoint)
if newline[1] == 'PRESSURE':
data_start = True
tdump_file.close()
#plottting
###set up the basemap instance
lat_pt = 80.
lon_pt = -65.
plt_lat_min = 0
plt_lat_max = 90#44.2
plt_lon_min = -150#-125.25
plt_lon_max = -1
m = Basemap(width=9000000,height=7000000,
rsphere=(6378137.00,6356752.3142),
resolution='l',area_thresh=1000.,projection='lcc',
lat_1=45.,lat_2=55,lat_0=lat_pt,lon_0=lon_pt)
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
m.drawmapboundary(fill_color='white')
#rough shapes
m.drawcoastlines()
m.fillcontinents(color='#FFFFBF',lake_color='#ABD9E9',zorder=0)
m.drawcountries()
####other data
np_endpoints = np.array(endpoints)
lats = np_endpoints[:,0]
lons = np_endpoints[:,1]
x,y = m(lons,lats)
bt = m.plot(x,y,linewidth = 2, color ='r')
plt.show()
| mit |
astocko/statsmodels | statsmodels/formula/tests/test_formula.py | 29 | 4647 | from statsmodels.compat.python import iteritems, StringIO
import warnings
from statsmodels.formula.api import ols
from statsmodels.formula.formulatools import make_hypotheses_matrices
from statsmodels.tools import add_constant
from statsmodels.datasets.longley import load, load_pandas
import numpy.testing as npt
from statsmodels.tools.testing import assert_equal
from numpy.testing.utils import WarningManager
longley_formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
class CheckFormulaOLS(object):
@classmethod
def setupClass(cls):
cls.data = load()
def test_endog_names(self):
assert self.model.endog_names == 'TOTEMP'
def test_exog_names(self):
assert self.model.exog_names == ['Intercept', 'GNPDEFL', 'GNP',
'UNEMP', 'ARMED', 'POP', 'YEAR']
def test_design(self):
npt.assert_equal(self.model.exog,
add_constant(self.data.exog, prepend=True))
def test_endog(self):
npt.assert_equal(self.model.endog, self.data.endog)
def test_summary(self):
# smoke test
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
warnings.filterwarnings("ignore",
"kurtosistest only valid for n>=20")
self.model.fit().summary()
finally:
warn_ctx.__exit__()
class TestFormulaPandas(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = load_pandas().data
cls.model = ols(longley_formula, data)
super(TestFormulaPandas, cls).setupClass()
class TestFormulaDict(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = dict((k, v.tolist()) for k, v in iteritems(load_pandas().data))
cls.model = ols(longley_formula, data)
super(TestFormulaDict, cls).setupClass()
class TestFormulaRecArray(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = load().data
cls.model = ols(longley_formula, data)
super(TestFormulaRecArray, cls).setupClass()
def test_tests():
formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
dta = load_pandas().data
results = ols(formula, dta).fit()
test_formula = '(GNPDEFL = GNP), (UNEMP = 2), (YEAR/1829 = 1)'
LC = make_hypotheses_matrices(results, test_formula)
R = LC.coefs
Q = LC.constants
npt.assert_almost_equal(R, [[0, 1, -1, 0, 0, 0, 0],
[0, 0 , 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1./1829]], 8)
npt.assert_array_equal(Q, [[0],[2],[1]])
def test_formula_labels():
# make sure labels pass through patsy as expected
# data(Duncan) from car in R
dta = StringIO(""""type" "income" "education" "prestige"\n"accountant" "prof" 62 86 82\n"pilot" "prof" 72 76 83\n"architect" "prof" 75 92 90\n"author" "prof" 55 90 76\n"chemist" "prof" 64 86 90\n"minister" "prof" 21 84 87\n"professor" "prof" 64 93 93\n"dentist" "prof" 80 100 90\n"reporter" "wc" 67 87 52\n"engineer" "prof" 72 86 88\n"undertaker" "prof" 42 74 57\n"lawyer" "prof" 76 98 89\n"physician" "prof" 76 97 97\n"welfare.worker" "prof" 41 84 59\n"teacher" "prof" 48 91 73\n"conductor" "wc" 76 34 38\n"contractor" "prof" 53 45 76\n"factory.owner" "prof" 60 56 81\n"store.manager" "prof" 42 44 45\n"banker" "prof" 78 82 92\n"bookkeeper" "wc" 29 72 39\n"mail.carrier" "wc" 48 55 34\n"insurance.agent" "wc" 55 71 41\n"store.clerk" "wc" 29 50 16\n"carpenter" "bc" 21 23 33\n"electrician" "bc" 47 39 53\n"RR.engineer" "bc" 81 28 67\n"machinist" "bc" 36 32 57\n"auto.repairman" "bc" 22 22 26\n"plumber" "bc" 44 25 29\n"gas.stn.attendant" "bc" 15 29 10\n"coal.miner" "bc" 7 7 15\n"streetcar.motorman" "bc" 42 26 19\n"taxi.driver" "bc" 9 19 10\n"truck.driver" "bc" 21 15 13\n"machine.operator" "bc" 21 20 24\n"barber" "bc" 16 26 20\n"bartender" "bc" 16 28 7\n"shoe.shiner" "bc" 9 17 3\n"cook" "bc" 14 22 16\n"soda.clerk" "bc" 12 30 6\n"watchman" "bc" 17 25 11\n"janitor" "bc" 7 20 8\n"policeman" "bc" 34 47 41\n"waiter" "bc" 8 32 10""")
from pandas import read_table
dta = read_table(dta, sep=" ")
model = ols("prestige ~ income + education", dta).fit()
assert_equal(model.fittedvalues.index, dta.index)
def test_formula_predict():
from numpy import log
formula = """TOTEMP ~ log(GNPDEFL) + log(GNP) + UNEMP + ARMED +
POP + YEAR"""
data = load_pandas()
dta = load_pandas().data
results = ols(formula, dta).fit()
npt.assert_almost_equal(results.fittedvalues.values,
results.predict(data.exog), 8)
| bsd-3-clause |
phdowling/scikit-learn | sklearn/decomposition/__init__.py | 147 | 1421 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
larrybradley/photutils | photutils/psf/groupstars.py | 2 | 9059 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides classes to perform grouping of stars.
"""
import abc
from astropy.table import Column
import numpy as np
__all__ = ['DAOGroup', 'DBSCANGroup', 'GroupStarsBase']
class GroupStarsBase(metaclass=abc.ABCMeta):
"""
This base class provides the basic interface for subclasses that
are capable of classifying stars in groups.
"""
def __call__(self, starlist):
"""
Classify stars into groups.
Parameters
----------
starlist : `~astropy.table.Table`
List of star positions. Columns named as ``x_0`` and
``y_0``, which corresponds to the centroid coordinates of
the sources, must be provided.
Returns
-------
group_starlist : `~astropy.table.Table`
``starlist`` with an additional column named ``group_id``
whose unique values represent groups of mutually overlapping
stars.
"""
return self.group_stars(starlist)
@abc.abstractmethod
def group_stars(self, starlist):
"""
Classify stars into groups.
Parameters
----------
starlist : `~astropy.table.Table`
List of star positions. Columns named as ``x_0`` and
``y_0``, which corresponds to the centroid coordinates of
the sources, must be provided.
Returns
-------
group_starlist : `~astropy.table.Table`
``starlist`` with an additional column named ``group_id``
whose unique values represent groups of mutually overlapping
stars.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
class DAOGroup(GroupStarsBase):
"""
This class implements the DAOGROUP algorithm presented by
Stetson (1987).
The method ``group_stars`` divides an entire starlist into sets of
distinct, self-contained groups of mutually overlapping stars.
It accepts as input a list of stars and determines which stars are close
enough to be capable of adversely influencing each others' profile fits.
Parameters
----------
crit_separation : float or int
Distance, in units of pixels, such that any two stars separated by
less than this distance will be placed in the same group.
Notes
-----
Assuming the psf fwhm to be known, ``crit_separation`` may be set to
k*fwhm, for some positive real k.
See Also
--------
photutils.detection.DAOStarFinder
References
----------
[1] Stetson, Astronomical Society of the Pacific, Publications,
(ISSN 0004-6280), vol. 99, March 1987, p. 191-222.
Available at:
https://ui.adsabs.harvard.edu/abs/1987PASP...99..191S/abstract
"""
def __init__(self, crit_separation):
self.crit_separation = crit_separation
@property
def crit_separation(self):
return self._crit_separation
@crit_separation.setter
def crit_separation(self, crit_separation):
if not isinstance(crit_separation, (float, int)):
raise ValueError('crit_separation is expected to be either float'
f'or int. Received {type(crit_separation)}.')
elif crit_separation < 0.0:
raise ValueError('crit_separation is expected to be a positive '
f'real number. Got {crit_separation}.')
else:
self._crit_separation = crit_separation
def group_stars(self, starlist):
cstarlist = starlist.copy()
if 'id' not in cstarlist.colnames:
cstarlist.add_column(Column(name='id',
data=np.arange(len(cstarlist)) + 1))
cstarlist.add_column(Column(name='group_id',
data=np.zeros(len(cstarlist),
dtype=int)))
if not np.array_equal(cstarlist['id'], np.arange(len(cstarlist)) + 1):
raise ValueError('id colum must be an integer-valued ' +
'sequence starting from 1. ' +
f"Got {cstarlist['id']}")
n = 1
while (cstarlist['group_id'] == 0).sum() > 0:
init_star = cstarlist[np.where(cstarlist['group_id'] == 0)[0][0]]
index = self.find_group(init_star,
cstarlist[cstarlist['group_id'] == 0])
cstarlist['group_id'][index-1] = n
k = 1
K = len(index)
while k < K:
init_star = cstarlist[cstarlist['id'] == index[k]]
tmp_index = self.find_group(
init_star, cstarlist[cstarlist['group_id'] == 0])
if len(tmp_index) > 0:
cstarlist['group_id'][tmp_index-1] = n
index = np.append(index, tmp_index)
K = len(index)
k += 1
n += 1
return cstarlist
def find_group(self, star, starlist):
"""
Find the ids of those stars in ``starlist`` which are at a
distance less than ``crit_separation`` from ``star``.
Parameters
----------
star : `~astropy.table.Row`
Star which will be either the head of a cluster or an
isolated one.
starlist : `~astropy.table.Table`
List of star positions. Columns named as ``x_0`` and
``y_0``, which corresponds to the centroid coordinates of
the sources, must be provided.
Returns
-------
result : `~numpy.ndarray`
Array containing the ids of those stars which are at a
distance less than ``crit_separation`` from ``star``.
"""
star_distance = np.hypot(star['x_0'] - starlist['x_0'],
star['y_0'] - starlist['y_0'])
distance_criteria = star_distance < self.crit_separation
return np.asarray(starlist[distance_criteria]['id'])
class DBSCANGroup(GroupStarsBase):
"""
Class to create star groups according to a distance criteria using
the Density-based Spatial Clustering of Applications with Noise
(DBSCAN) from scikit-learn.
Parameters
----------
crit_separation : float or int
Distance, in units of pixels, such that any two stars separated
by less than this distance will be placed in the same group.
min_samples : int, optional (default=1)
Minimum number of stars necessary to form a group.
metric : string or callable (default='euclidean')
The metric to use when calculating distance between each pair of
stars.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used to actually find nearest neighbors.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree.
References
----------
[1] Scikit Learn DBSCAN.
https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html#sklearn.cluster.DBSCAN
Notes
-----
* The attribute ``crit_separation`` corresponds to ``eps`` in
`sklearn.cluster.DBSCAN
<https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html#sklearn.cluster.DBSCAN>`_.
* This class provides more general algorithms than
`photutils.psf.DAOGroup`. More precisely,
`photutils.psf.DAOGroup` is a special case of
`photutils.psf.DBSCANGroup` when ``min_samples=1`` and
``metric=euclidean``. Additionally, `photutils.psf.DBSCANGroup`
may be faster than `photutils.psf.DAOGroup`.
"""
def __init__(self, crit_separation, min_samples=1, metric='euclidean',
algorithm='auto', leaf_size=30):
self.crit_separation = crit_separation
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
def group_stars(self, starlist):
from sklearn.cluster import DBSCAN
cstarlist = starlist.copy()
if 'id' not in cstarlist.colnames:
cstarlist.add_column(Column(name='id',
data=np.arange(len(cstarlist)) + 1))
if not np.array_equal(cstarlist['id'], np.arange(len(cstarlist)) + 1):
raise ValueError('id colum must be an integer-valued ' +
'sequence starting from 1. ' +
f"Got {cstarlist['id']}")
pos_stars = np.transpose((cstarlist['x_0'], cstarlist['y_0']))
dbscan = DBSCAN(eps=self.crit_separation,
min_samples=self.min_samples, metric=self.metric,
algorithm=self.algorithm, leaf_size=self.leaf_size)
cstarlist['group_id'] = (dbscan.fit(pos_stars).labels_ +
np.ones(len(cstarlist), dtype=int))
return cstarlist
| bsd-3-clause |
alexbruy/QGIS | python/plugins/processing/algs/qgis/QGISAlgorithmProvider.py | 1 | 11178 | # -*- coding: utf-8 -*-
"""
***************************************************************************
QGISAlgorithmProvider.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
try:
import matplotlib.pyplot
assert matplotlib # NOQA silence pyflakes
hasMatplotlib = True
except:
hasMatplotlib = False
try:
import shapely
assert shapely # silence pyflakes
hasShapely = True
except:
hasShapely = False
from qgis.PyQt.QtGui import QIcon
from qgis.core import Qgis, QgsWkbTypes
from processing.core.AlgorithmProvider import AlgorithmProvider
from processing.script.ScriptUtils import ScriptUtils
from .RegularPoints import RegularPoints
from .SymmetricalDifference import SymmetricalDifference
from .VectorSplit import VectorSplit
from .VectorGrid import VectorGrid
from .RandomExtract import RandomExtract
from .RandomExtractWithinSubsets import RandomExtractWithinSubsets
from .ExtractByLocation import ExtractByLocation
from .PointsInPolygon import PointsInPolygon
from .PointsInPolygonUnique import PointsInPolygonUnique
from .PointsInPolygonWeighted import PointsInPolygonWeighted
from .SumLines import SumLines
from .BasicStatisticsNumbers import BasicStatisticsNumbers
from .BasicStatisticsStrings import BasicStatisticsStrings
from .NearestNeighbourAnalysis import NearestNeighbourAnalysis
from .LinesIntersection import LinesIntersection
from .MeanCoords import MeanCoords
from .PointDistance import PointDistance
from .UniqueValues import UniqueValues
from .ReprojectLayer import ReprojectLayer
from .ExportGeometryInfo import ExportGeometryInfo
from .Centroids import Centroids
from .Delaunay import Delaunay
from .VoronoiPolygons import VoronoiPolygons
from .DensifyGeometries import DensifyGeometries
from .MultipartToSingleparts import MultipartToSingleparts
from .SimplifyGeometries import SimplifyGeometries
from .LinesToPolygons import LinesToPolygons
from .PolygonsToLines import PolygonsToLines
from .SinglePartsToMultiparts import SinglePartsToMultiparts
from .ExtractNodes import ExtractNodes
from .ConvexHull import ConvexHull
from .FixedDistanceBuffer import FixedDistanceBuffer
from .VariableDistanceBuffer import VariableDistanceBuffer
from .Clip import Clip
from .Difference import Difference
from .Dissolve import Dissolve
from .Intersection import Intersection
from .ExtentFromLayer import ExtentFromLayer
from .RandomSelection import RandomSelection
from .RandomSelectionWithinSubsets import RandomSelectionWithinSubsets
from .SelectByLocation import SelectByLocation
from .Union import Union
from .DensifyGeometriesInterval import DensifyGeometriesInterval
from .Eliminate import Eliminate
from .SpatialJoin import SpatialJoin
from .DeleteColumn import DeleteColumn
from .DeleteHoles import DeleteHoles
from .DeleteDuplicateGeometries import DeleteDuplicateGeometries
from .TextToFloat import TextToFloat
from .ExtractByAttribute import ExtractByAttribute
from .SelectByAttribute import SelectByAttribute
from .Grid import Grid
from .Gridify import Gridify
from .HubDistance import HubDistance
from .HubLines import HubLines
from .Merge import Merge
from .GeometryConvert import GeometryConvert
from .ConcaveHull import ConcaveHull
from .RasterLayerStatistics import RasterLayerStatistics
from .StatisticsByCategories import StatisticsByCategories
from .EquivalentNumField import EquivalentNumField
from .AddTableField import AddTableField
from .FieldsCalculator import FieldsCalculator
from .SaveSelectedFeatures import SaveSelectedFeatures
from .Explode import Explode
from .AutoincrementalField import AutoincrementalField
from .FieldPyculator import FieldsPyculator
from .JoinAttributes import JoinAttributes
from .CreateConstantRaster import CreateConstantRaster
from .PointsLayerFromTable import PointsLayerFromTable
from .PointsDisplacement import PointsDisplacement
from .ZonalStatistics import ZonalStatistics
from .PointsFromPolygons import PointsFromPolygons
from .PointsFromLines import PointsFromLines
from .RandomPointsExtent import RandomPointsExtent
from .RandomPointsLayer import RandomPointsLayer
from .RandomPointsPolygonsFixed import RandomPointsPolygonsFixed
from .RandomPointsPolygonsVariable import RandomPointsPolygonsVariable
from .RandomPointsAlongLines import RandomPointsAlongLines
from .PointsToPaths import PointsToPaths
from .PostGISExecuteSQL import PostGISExecuteSQL
from .ImportIntoPostGIS import ImportIntoPostGIS
from .SetVectorStyle import SetVectorStyle
from .SetRasterStyle import SetRasterStyle
from .SelectByExpression import SelectByExpression
from .SelectByAttributeSum import SelectByAttributeSum
from .HypsometricCurves import HypsometricCurves
from .SplitLinesWithLines import SplitLinesWithLines
from .FieldsMapper import FieldsMapper
from .Datasources2Vrt import Datasources2Vrt
from .CheckValidity import CheckValidity
from .OrientedMinimumBoundingBox import OrientedMinimumBoundingBox
from .Smooth import Smooth
from .ReverseLineDirection import ReverseLineDirection
from .SpatialIndex import SpatialIndex
from .DefineProjection import DefineProjection
from .RectanglesOvalsDiamondsVariable import RectanglesOvalsDiamondsVariable
from .RectanglesOvalsDiamondsFixed import RectanglesOvalsDiamondsFixed
from .MergeLines import MergeLines
pluginPath = os.path.normpath(os.path.join(
os.path.split(os.path.dirname(__file__))[0], os.pardir))
class QGISAlgorithmProvider(AlgorithmProvider):
def __init__(self):
AlgorithmProvider.__init__(self)
self._icon = QIcon(os.path.join(pluginPath, 'images', 'qgis.svg'))
self.alglist = [SumLines(), PointsInPolygon(),
PointsInPolygonWeighted(), PointsInPolygonUnique(),
BasicStatisticsStrings(), BasicStatisticsNumbers(),
NearestNeighbourAnalysis(), MeanCoords(),
LinesIntersection(), UniqueValues(), PointDistance(),
ReprojectLayer(), ExportGeometryInfo(), Centroids(),
Delaunay(), VoronoiPolygons(), SimplifyGeometries(),
DensifyGeometries(), DensifyGeometriesInterval(),
MultipartToSingleparts(), SinglePartsToMultiparts(),
PolygonsToLines(), LinesToPolygons(), ExtractNodes(),
Eliminate(), ConvexHull(), FixedDistanceBuffer(),
VariableDistanceBuffer(), Dissolve(), Difference(),
Intersection(), Union(), Clip(), ExtentFromLayer(),
RandomSelection(), RandomSelectionWithinSubsets(),
SelectByLocation(), RandomExtract(), DeleteHoles(),
RandomExtractWithinSubsets(), ExtractByLocation(),
SpatialJoin(), RegularPoints(), SymmetricalDifference(),
VectorSplit(), VectorGrid(), DeleteColumn(),
DeleteDuplicateGeometries(), TextToFloat(),
ExtractByAttribute(), SelectByAttribute(), Grid(),
Gridify(), HubDistance(), HubLines(), Merge(),
GeometryConvert(), AddTableField(), FieldsCalculator(),
SaveSelectedFeatures(), JoinAttributes(),
AutoincrementalField(), Explode(), FieldsPyculator(),
EquivalentNumField(), PointsLayerFromTable(),
StatisticsByCategories(), ConcaveHull(),
RasterLayerStatistics(), PointsDisplacement(),
ZonalStatistics(), PointsFromPolygons(),
PointsFromLines(), RandomPointsExtent(),
RandomPointsLayer(), RandomPointsPolygonsFixed(),
RandomPointsPolygonsVariable(),
RandomPointsAlongLines(), PointsToPaths(),
PostGISExecuteSQL(), ImportIntoPostGIS(),
SetVectorStyle(), SetRasterStyle(),
SelectByExpression(), HypsometricCurves(),
SplitLinesWithLines(), CreateConstantRaster(),
FieldsMapper(), SelectByAttributeSum(), Datasources2Vrt(),
CheckValidity(), OrientedMinimumBoundingBox(), Smooth(),
ReverseLineDirection(), SpatialIndex(), DefineProjection(),
RectanglesOvalsDiamondsVariable(),
RectanglesOvalsDiamondsFixed(), MergeLines()
]
if hasMatplotlib:
from .VectorLayerHistogram import VectorLayerHistogram
from .RasterLayerHistogram import RasterLayerHistogram
from .VectorLayerScatterplot import VectorLayerScatterplot
from .MeanAndStdDevPlot import MeanAndStdDevPlot
from .BarPlot import BarPlot
from .PolarPlot import PolarPlot
self.alglist.extend([
VectorLayerHistogram(), RasterLayerHistogram(),
VectorLayerScatterplot(), MeanAndStdDevPlot(), BarPlot(),
PolarPlot(),
])
if hasShapely:
from .Polygonize import Polygonize
self.alglist.extend([Polygonize()])
if Qgis.QGIS_VERSION_INT >= 21300:
from .ExecuteSQL import ExecuteSQL
self.alglist.extend([ExecuteSQL()])
self.externalAlgs = [] # to store algs added by 3rd party plugins as scripts
folder = os.path.join(os.path.dirname(__file__), 'scripts')
scripts = ScriptUtils.loadFromFolder(folder)
for script in scripts:
script.allowEdit = False
self.alglist.extend(scripts)
for alg in self.alglist:
alg._icon = self._icon
def initializeSettings(self):
AlgorithmProvider.initializeSettings(self)
def unload(self):
AlgorithmProvider.unload(self)
def getName(self):
return 'qgis'
def getDescription(self):
return self.tr('QGIS geoalgorithms')
def getIcon(self):
return self._icon
def _loadAlgorithms(self):
self.algs = list(self.alglist) + self.externalAlgs
def supportsNonFileBasedOutput(self):
return True
| gpl-2.0 |
dmsuehir/spark-tk | regression-tests/sparktkregtests/testcases/frames/frame_binary_classification_test.py | 13 | 9995 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" tests multiclass classification metrics"""
import unittest
from sparktkregtests.lib import sparktk_test
class BinaryClassificationMetrics(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Tests binary classification"""
super(BinaryClassificationMetrics, self).setUp()
self.dataset = [("blue", 1, 0, 0),
("blue", 3, 1, 0),
("green", 1, 0, 0),
("green", 0, 1, 0)]
self.schema = [("a", str),
("b", int),
("labels", int),
("predictions", int)]
self.frame = self.context.frame.create(self.dataset,
schema=self.schema)
def test_binary_classification_metrics(self):
"""test binary classification metrics with normal data"""
# call the binary classification metrics function
class_metrics = self.frame.binary_classification_metrics("labels",
"predictions",
1,
1)
# get the confusion matrix values
conf_matrix = class_metrics.confusion_matrix.values
# labeling each of the cells in our confusion matrix
# makes this easier for me to read
# the confusion matrix should look something like this:
# predicted pos predicted neg
# actual pos [0][0] [0][1]
# actual neg [1][0] [1][1]
true_pos = conf_matrix[0][0]
false_neg = conf_matrix[0][1]
false_pos = conf_matrix[1][0]
true_neg = conf_matrix[1][1]
# the total number of predictions, total number pos and neg
total_pos = true_pos + false_neg
total_neg = true_neg + false_pos
total = total_pos + total_neg
# recall is defined in the docs as the total number of true pos
# results divided by the false negatives + pos
recall = true_pos / (false_neg + true_pos)
# from the docs, precision = true pos / false pos + true pos
precision = true_pos / (false_pos + true_pos)
# from the docs this is the def of f_measure
f_measure = (recall * precision) / (recall + precision)
# according to the documentation the accuracy
# is defined as the total correct predictions divided by the
# total number of predictions
accuracy = float(true_pos + true_neg) / float(total)
pos_count = 0
pandas_frame = self.frame.to_pandas()
# calculate the number of pos results and neg results in the data
for index, row in pandas_frame.iterrows():
if row["labels"] is 1:
pos_count = pos_count + 1
neg_count = total - pos_count
# finally we compare our results with sparktk's
self.assertAlmostEqual(class_metrics.recall, recall)
self.assertAlmostEqual(class_metrics.precision, precision)
self.assertAlmostEqual(class_metrics.f_measure, f_measure)
self.assertAlmostEqual(class_metrics.accuracy, accuracy)
self.assertEqual(total_pos, pos_count)
self.assertEqual(total_neg, neg_count)
def test_binary_classification_metrics_bad_beta(self):
"""Test binary classification metrics with negative beta"""
# should throw an error because beta must be >0
with self.assertRaisesRegexp(Exception, "greater than or equal to 0"):
class_metrics = self.frame.binary_classification_metrics("labels",
"predictions",
1,
beta=-1)
def test_binary_classification_metrics_valid_beta(self):
"""test binary class metrics with a valid value for beta"""
# this is a valid value for beta so this should not throw an error
class_metrics = self.frame.binary_classification_metrics("labels",
"predictions",
1,
beta=2)
def test_binary_classification_matrics_with_invalid_beta_type(self):
"""Test binary class metrics with a beta of invalid type"""
with self.assertRaisesRegexp(Exception, "could not convert string to float"):
class_metrics = self.frame.binary_classification_metrics("labels",
"predictions",
1,
beta="bla")
def test_binary_classification_metrics_with_invalid_pos_label(self):
"""Test binary class metrics with a pos label that does not exist"""
# should not error but should return no pos predictions
class_metrics = self.frame.binary_classification_metrics("labels",
"predictions",
"bla",
1)
# assert that no positive results were found since
# there are no labels in the data with "bla"
conf_matrix = class_metrics.confusion_matrix.values
# assert no predicted pos actual pos
self.assertEqual(conf_matrix[0][0], 0)
# assert no actual pos predicted neg
self.assertEqual(conf_matrix[1][0], 0)
def test_binary_classification_metrics_with_frequency_col(self):
"""test binay class metrics with a frequency column"""
dataset = [("blue", 1, 0, 0, 1),
("blue", 3, 1, 0, 1),
("green", 1, 0, 0, 3),
("green", 0, 1, 0, 1)]
schema = [("a", str),
("b", int),
("labels", int),
("predictions", int),
("frequency", int)]
frame = self.context.frame.create(dataset, schema=schema)
class_metrics = frame.binary_classification_metrics("labels",
"predictions",
1,
1,
frequency_column="frequency")
conf_matrix = class_metrics.confusion_matrix.values
true_pos = conf_matrix[0][0]
false_neg = conf_matrix[0][1]
false_pos = conf_matrix[1][0]
true_neg = conf_matrix[1][1]
total_pos = true_pos + false_neg
total_neg = true_neg + false_pos
total = total_pos + total_neg
# these calculations use the definitions from the docs
recall = true_pos / (false_neg + true_pos)
precision = true_pos / (false_pos + true_pos)
f_measure = (recall * precision) / (recall + precision)
accuracy = float(true_pos + true_neg) / float(total)
pos_count = 0
pandas_frame = self.frame.to_pandas()
# calculate the number of pos results and neg results in the data
for index, row in pandas_frame.iterrows():
if row["labels"] is 1:
pos_count = pos_count + 1
neg_count = total - pos_count
# finally we check that our values match sparktk's
self.assertAlmostEqual(class_metrics.recall, recall)
self.assertAlmostEqual(class_metrics.precision, precision)
self.assertAlmostEqual(class_metrics.f_measure, f_measure)
self.assertAlmostEqual(class_metrics.accuracy, accuracy)
self.assertEqual(total_pos, pos_count)
self.assertEqual(total_neg, neg_count)
def test_binary_classification_metrics_with_invalid_frequency_col(self):
"""test binary class metrics with a frequency col of invalid type"""
dataset = [("blue", 1, 0, 0, "bla"),
("blue", 3, 1, 0, "bla"),
("green", 1, 0, 0, "bla"),
("green", 0, 1, 0, "bla")]
schema = [("a", str),
("b", int),
("labels", int),
("predictions", int),
("frequency", str)]
frame = self.context.frame.create(dataset, schema=schema)
# this should throw an error because the frequency col
# we provided is of type str but should be of type int
with self.assertRaisesRegexp(Exception, "NumberFormatException"):
class_metrics = frame.binary_classification_metrics("labels",
"predictions",
1,
1,
frequency_column="frequency")
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
olologin/scikit-learn | examples/mixture/plot_gmm_pdf.py | 140 | 1521 | """
=========================================
Density Estimation for a Gaussian mixture
=========================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GaussianMixture(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20., 30.)
y = np.linspace(-20., 40.)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
michaelaye/pyciss | pyciss/meta.py | 1 | 2895 | """This module deals with the metadata I have received from collaborators.
It defines the location of ring resonances for the RingCube plotting.
"""
import pandas as pd
import pkg_resources as pr
def get_order(name):
ratio = name.split()[1]
a, b = ratio.split(":")
return int(a) - int(b)
def get_resonances():
with pr.resource_stream("pyciss", "data/ring_resonances.csv") as f:
resonances = pd.read_csv(f)
resonances.columns = ["name", "radius", "a_moon", "n", "kappa"]
resonances = resonances.sort_values(by="radius", ascending=True)
resonances["order"] = resonances.name.map(get_order)
return resonances
def get_prime_resonances():
resonances = get_resonances()
prime_resonances = resonances[resonances.order == 1].drop("order", axis=1)
# filter out Janus and Epimetheus as we have a more precise file for that.
prime_resonances = prime_resonances.loc[
~prime_resonances.name.str.startswith("Janus")
]
prime_resonances = prime_resonances.loc[
~prime_resonances.name.str.startswith("Epimetheus")
]
return prime_resonances
# Janus Epithemeus resonances
def get_janus_epimetheus_resonances():
w = [len(" Janus1"), len(" reson"), len(" Resonance radius R")]
def get_janos_epi_order(reso):
a, b = reso.split(":")
return int(a) - int(b)
fname = pr.resource_filename("pyciss", "data/ring_janus_epimetheus_resonances.txt")
with open(fname) as f:
jan_epi_resonances = pd.read_fwf(
f, skiprows=15, header=0, widths=w, skipfooter=1
)
# replace column names
jan_epi_resonances.columns = ["moon", "reson", "radius"]
# calculate order from resonance name
jan_epi_resonances["order"] = jan_epi_resonances.reson.map(get_janos_epi_order)
def func(x):
"Remove space from resonce string"
return ":".join(i.strip() for i in x.split(":"))
jan_epi_resonances.reson = jan_epi_resonances.reson.map(func)
# calculate name for axes display
jan_epi_resonances["name"] = (
jan_epi_resonances.moon + " " + jan_epi_resonances.reson
)
return jan_epi_resonances
def get_prime_jan_epi():
jan_epi_resonances = get_janus_epimetheus_resonances()
# remove orders > 1 and drop unrequired columns
prime_jan_epis = jan_epi_resonances[jan_epi_resonances.order == 1]
to_drop = ["order", "moon"]
prime_jan_epis = prime_jan_epis.drop(to_drop, axis=1)
return prime_jan_epis
def get_all_resonances():
prime_resonances = get_prime_resonances()
prime_jan_epis = get_prime_jan_epi()
all_resonances = pd.concat(
[prime_resonances, prime_jan_epis], ignore_index=True, sort=False
)
all_resonances.sort_values(by="radius", inplace=True)
all_resonances["moon"] = all_resonances.name.map(lambda x: x.split()[0].lower())
return all_resonances
| isc |
kylerbrown/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 105 | 22788 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Tests that precomputed metric returns pointer to, and not copy of, X.
S = np.dot(X, X.T)
S2 = pairwise_distances(S, metric="precomputed")
assert_true(S is S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
XA = np.arange(45)
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.resize(np.arange(45), (5, 9))
XB = np.arange(32)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
sniemi/SamPy | sandbox/src/COSFUVSensitivityAndSAAMapping.py | 1 | 34574 | '''
Adapted from Merle Reinhard's HST script.
http://stevendkay.wordpress.com/2009/10/12/scatter-plots-with-basemap-and-matplotlib/
'''
from mpl_toolkits.basemap import Basemap
from pylab import *
from matplotlib.font_manager import FontProperties
from matplotlib.collections import LineCollection
from matplotlib import cm
import sys
import string, math
import ephem
import time_util
import spst_getopt
import numpy as N
import pylab as P
import datetime as D
def toJulian(data):
'''
Converts to Modified Julian Date.
'''
return ephem.julian_date(data) - 2400000.5
def fromJulian(j):
'''
Converts Modified Julian days to human readable format
@return: human readable date and time
'''
import time
days = j - 40587 # From Jan 1 1900
sec = days*86400.0
return time.gmtime(sec)
def padData(Data, hst_startMJD, hst_stopMJD):
'''
Resamples telemetry data to every second.
Pads missing time stamps with the previous value
if necessary.
@todo: Remove unnecessary use of Python list that cosumes memory
@return: 2-d NumPy array
'''
result = []
delta = int((hst_stopMJD - hst_startMJD) * 24 * 60 * 60) #in seconds
newtime = N.arange(delta + 1) / (24 * 60 * 60.) + hst_startMJD
if hst_startMJD == Data['MJD'][0]:
one_before = 0
else:
one_before = Data[Data['MJD'] <= hst_startMJD].shape[0] - 1
if hst_stopMJD == Data['MJD'][-1]:
one_after = -1
else:
one_after = Data[Data['MJD'] <= hst_stopMJD].shape[0] + 1
before = Data['COUNTS'][one_before]
after = Data['COUNTS'][one_after]
limited = Data[(Data['MJD'] <= hst_stopMJD) & (Data['MJD'] >= hst_startMJD)]
length = limited.shape[0]
i = 0
for x in newtime:
if i < length and i == 0 and x < limited['MJD'][i]:
result.append((x, before))
elif i < length and i > 0 and x < limited['MJD'][i]:
result.append((x, limited['COUNTS'][i-1]))
elif i == length:
result.append((x, after))
else:
result.append((x, limited['COUNTS'][i]))
i += 1
return N.array(result, dtype = [('MJD', N.float64), ('COUNTS', N.float32)])
def plotCounts(FUVAevents, FUVBevents, FUVAhv, FUVBhv, MAMAevents, MAMAhv, hst_startMJD, hst_stopMJD):
'''
Changes the HV signs.
'''
fig = P.figure()
left, width, height = 0.1, 0.8, 0.3
rect1 = [left, 0.7, width, height]
rect2 = [left, 0.4, width, height]
rect3 = [left, 0.1, width, height]
ax1 = fig.add_axes(rect1) #left, bottom, width, height
ax2 = fig.add_axes(rect2)
ax3 = fig.add_axes(rect3)
#FUVA subplot
# ax1.plot(FUVAevents['MJD'], FUVAevents['COUNTS'], ls = 'steps-', lw = 2, label = 'EVENTS')
# ax1.plot(FUVAhv['MJD'], -FUVAhv['COUNTS'], ls = 'steps-', lw = 2, label = 'High Voltage')
ax1.plot(FUVAevents['MJD'], FUVAevents['COUNTS'], ls = '-', lw = 2, label = 'EVENTS')
ax1.plot(FUVAhv['MJD'], -FUVAhv['COUNTS'], ls = '-', lw = 2, label = 'High Voltage')
#FUVB subplot
# ax2.plot(FUVBevents['MJD'], FUVBevents['COUNTS'], ls = 'steps-', lw = 2, label = 'EVENTS')
# ax2.plot(FUVBhv['MJD'], -FUVBhv['COUNTS'], ls = 'steps-', lw = 2, label = 'High Voltage')
ax2.plot(FUVBevents['MJD'], FUVBevents['COUNTS'], ls = '-', lw = 2, label = 'EVENTS')
ax2.plot(FUVBhv['MJD'], -FUVBhv['COUNTS'], ls = '-', lw = 2, label = 'High Voltage')
#MAM subplot
# ax3.plot(MAMAevents['MJD'], MAMAevents['COUNTS'], ls = 'steps-', lw = 2, label = 'EVENTS')
# ax3.plot(MAMAhv['MJD'], -MAMAhv['COUNTS'], ls = 'steps-', lw = 2, label = 'High Voltage')
ax3.plot(MAMAevents['MJD'], MAMAevents['COUNTS'], ls = '-', lw = 2, label = 'EVENTS')
ax3.plot(MAMAhv['MJD'], -MAMAhv['COUNTS'], ls = '-', lw = 2, label = 'High Voltage')
ax1.annotate('FUVA', xy = (0.1,0.8), xycoords='axes fraction', horizontalalignment='center', verticalalignment='center')
ax2.annotate('FUVB', xy = (0.1,0.8), xycoords='axes fraction', horizontalalignment='center', verticalalignment='center')
ax3.annotate('NUV', xy = (0.1,0.8), xycoords='axes fraction', horizontalalignment='center', verticalalignment='center')
ax1.set_xticklabels([])
ax2.set_xticklabels([])
ax1.set_yscale('log')
ax2.set_yscale('log')
ax3.set_yscale('log')
ax1.set_yticks(ax1.get_yticks()[1:-1])
ax2.set_yticks(ax2.get_yticks()[1:-1])
ax3.set_yticks(ax3.get_yticks()[1:-1])
ax1.set_ylabel('COUNTS')
ax2.set_ylabel('COUNTS')
ax3.set_ylabel('COUNTS')
ax1.set_xlabel('MJD')
ax1.set_xlim(hst_startMJD, hst_stopMJD)
ax2.set_xlim(hst_startMJD, hst_stopMJD)
ax3.set_xlim(hst_startMJD, hst_stopMJD)
ax3.set_ylim(5, 6000)
#P.legend(shadow = True, fancybox = True)
P.show()
def plotHSTtrack(hst_start, hst_stop, tlefile, ephemerisOnline = False):
num_points = int((hst_stop - hst_start)/ephem.minute) + 1
# Read in the HST Two-Line ephemeris
if ephemerisOnline:
import urllib2
hand = urllib2.open('http://celestrak.com/NORAD/elements/science.txt')
data = hand.readlines()
hand.close()
HST = [[d[val], d[val+1], d[val+2]] for val, line in enumerate(d) if 'HST' in line]
hst = ephem.readtle(string.strip(HST[0][0]), string.strip(HST[0][1]), string.strip(HST[0][2]))
else:
temp = open(tlefile, 'r').readlines()
hst = ephem.readtle(string.strip(temp[0]), string.strip(temp[1]), string.strip(temp[2]))
cur_time = hst_start
hst_longs = []
hst_lats = []
for i in range(0,num_points):
hst.compute(cur_time)
hst_longs.append(hst.sublong.znorm*180.0/math.pi)
hst_lats.append(hst.sublat*180.0/math.pi)
cur_time = cur_time + ephem.minute
lon_0 = 335
lat_0 = -20
llcrnrlat = -60
llcrnrlon = -100
urcrnrlat = 20
urcrnrlon = 60
# use these values to setup Basemap instance.
width = 14000000
height = 10000000
#m = Basemap(width=width,height=height,\
# resolution='c',projection='aeqd',\
# lat_0=lat_0,lon_0=lon_0)
#m = Basemap(resolution='c',projection='aeqd',lat_0=lat_0,lon_0=lon_0)
#m = Basemap(width=width,height=height,\
# resolution='c',projection='aea',\
# lat_0=lat_0,lon_0=lon_0)
m = Basemap(resolution='c', projection='mbtfpq', lon_0=lon_0)
#m = Basemap(resolution='c',projection='moll',lon_0=lon_0)
#m = Basemap(resolution='c',projection='ortho',lon_0=lon_0,lat_0=lat_0)
#m = Basemap(resolution='c',projection='cyl',llcrnrlat=llcrnrlat,llcrnrlon=llcrnrlon,urcrnrlat=urcrnrlat,urcrnrlon=urcrnrlon)
p = FontProperties()
font1 = p.copy()
font1.set_size('small')
# draw coasts and fill continents.
m.drawcoastlines(linewidth = 0.5)
#m.fillcontinents()
m.drawparallels(arange(-80,81,10),labels=[1,1,0,0],fontproperties=font1,labelstyle="+/-")
m.drawmeridians(arange(-180,180,20),labels=[0,0,0,1],fontproperties=font1,labelstyle="+/-")
m.bluemarble()
m.drawmapboundary()
# SAA 02
x2,y2 = m([357.4-360,357.6-360,356.9-360,355.0-360,352.3-360,348.7-360,342.9-360,336.4-360,324.8-360,303.2-360,292.1-360,289.0-360,285.9-360,283.5-360,282.5-360,282.4-360,282.7-360,357.4-360], \
[-28.3,-26.1,-23.7,-21.2,-18.8,-16.3,-13.0,-10.6, -9.1,-11.9,-14.9,-17.0,-19.1,-21.3,-23.7,-26.0,-28.6,-28.3])
# SAA 03
#x3,y3 = m([294.4-360,301.4-360,350.0-360,358.4-360,335.8-360,304.6-360,295.5-360,279.4-360,282.6-360,294.4-360], \
# [-41.0,-42.8,-30.0,-20.9,-4.9,-4.9,-7.0,-21.9,-32.7,-41.0])
x3,y3 = m([ 20.0, 21.0, 19.0, 7.5,347.0-360,336.4-360,324.8-360,303.2-360,292.1-360,285.9-360,283.5-360,282.5-360,282.4-360,282.7-360, 20.0], \
[-28.3,-27.5,-26.1,-19.8, -9.6, -7.6, -6.0, -7.9,-12.0,-17.1,-20.3,-23.5,-26.0,-28.6,-28.3])
# SAA 04
#x4,y4 = m([335.0-360,345.0-360,349.0-360,346.0-360,330.0-360,314.0-360,310.0-360,303.0-360,310.0-360,325.0-360,335.0-360], \
# [-33.0,-27.0,-24.0,-23.0,-25.0,-30.0,-32.2,-39.0,-40.0,-37.0,-33.0])
x4,y4 = m([ 25.0, 7.0,351.0-360,341.0-360,318.0-360,300.0-360,290.0-360,284.0-360,278.0-360,273.0-360,275.0-360, 25.0], \
[-28.5,-16.0, -6.5, -2.0, 1.0, -3.0, -7.0,-10.0,-15.0,-20.0,-30.0,-28.5])
# SAA 05,23
x5,y5 = m([300.0-360, 45.0, 40.0, 30.0, 10.0, 0.0,341.0-360,318.0-360,300.0-360,283.0-360,273.0-360,275.0-360,300.0-360], \
[-50.0,-30.0,-25.0,-21.0,-15.0,-10.2, -2.0, 1.0, -3.0, -8.0,-20.0,-30.0,-50.0])
# SAA 06
#x6,y6 = m([359.0-360,360.0-360,335.4-360,323.0-360,290.0-360,280.0-360,276.0-360,280.0-360,359.0-360], \
# [-28.0,-20.9,-3.4,-0.0,-7.0,-12.6,-20.9,-30.0,-28.0])
x6,y6 = m([ 20.0, 21.0, 19.0, 7.5,347.0-360,336.4-360,324.8-360,303.2-360,292.1-360,285.9-360,283.5-360,282.5-360,282.4-360,282.7-360, 20.0], \
[-28.3,-27.5,-26.1,-19.8, -9.6, -7.6, -6.0, -7.9,-12.0,-17.1,-20.3,-23.5,-26.0,-28.6,-28.3])
# SAA 07
x7,y7 = m([300.0-360,359.0-360,5.0,341.0-360,318.0-360,300.0-360,283.0-360,273.0-360,275.0-360,300.0-360], \
[-50.0,-41.0,-23.0,-2.0,1.0,-3.0,-8.0,-20.0,-30.0,-50.0])
# SAA 24,25,28,31,32
x24,y24=m([ 20.0, 21.0, 19.0, 7.5,347.0-360,336.4-360,324.8-360,303.2-360,292.1-360,285.9-360,283.5-360,282.5-360,282.4-360,282.7-360, 20.0], \
[-28.3,-27.5,-26.1,-19.8, -9.6, -7.6, -6.0, -7.9,-12.0,-17.1,-20.3,-23.5,-26.0,-28.6,-28.3])
# SAA 26,27,29,30
x26,y26=m([ 25.0, 7.0,351.0-360,341.0-360,318.0-360,300.0-360,290.0-360,284.0-360,278.0-360,273.0-360,275.0-360, 25.0], \
[-28.5,-16.0, -6.5, -2.0, 1.0, -3.0, -7.0,-10.0,-15.0,-20.0,-30.0,-28.5])
# HST observation ground track
xhst,yhst = m(hst_longs, hst_lats)
saa02 = m.plot(x2,y2,marker='D',markersize=4.0,markeredgewidth=0.0,color='turquoise',linestyle='-',label='02')
saa03 = m.plot(x3,y3,marker='v',markersize=4.0,markeredgewidth=0.0,color='white',linestyle='-',label='03')
saa04 = m.plot(x4,y4,marker='^',markersize=4.0,markeredgewidth=0.0,color='orange',linestyle='-',label='04')
saa05 = m.plot(x5,y5,marker='s',markersize=4.0,markeredgewidth=0.0,color='green',linestyle='-',label='05')
saa06 = m.plot(x6,y6,marker='x',markersize=4.0,markeredgewidth=1.0,color='magenta',linestyle='-',label='06')
#saa07 = m.plot(x7,y7,marker='>',markersize=4.0,markeredgewidth=0.0,color='darkorchid',linestyle='-',label='07')
#saa24 = m.plot(x24,y24,marker='x',markersize=4.0,markeredgewidth=1.0,color='green',linestyle='-',label='24')
#saa26 = m.plot(x26,y26,marker='^',markersize=4.0,markeredgewidth=0.0,color='maroon',linestyle='-',label='26')
hst = m.plot(xhst,yhst,marker='+',markersize=4.0,markeredgewidth=1.0,color='red',linestyle='-',linewidth=0.7,label='hst')
#SMN:
#cnts must be sampled similar as xhst and yhst!
#cs = m.contour(xhst,yhst,cnts,15,linewidths=1.5)
hst_label = 'HST once per minute'
font = p.copy()
#font.set_size('xx-small')
font.set_size('small')
leg=legend((saa02,saa03,saa04,saa05,saa06,hst), \
('PASS SAA Level 1 - FGS Guidance & STIS LV', \
'PASS SAA Level 2 - STIS', \
'PASS SAA Level 3 - ACS & WFC3', \
'PASS SAA Level 4 - Astrometry & NICMOS', \
'PASS SAA Level 5 - COS', \
#'07 - GHRS', \
#'24/25/31/32 - STIS CCD/STIS MAMA/COS FUV/COS NUV', \
#'26/27/28/29/30 - WFPC2/ACS CCD/ACS SBC/WFC3 UVIS/WFC3 IR', \
hst_label), \
prop=font,numpoints=2,borderpad=0.3,loc='upper center',borderaxespad=0.0,ncol=2)
leg.get_frame().set_alpha(0.7)
#figlegend((saa02,saa05,saa24,saa26),('02','05','24','26'),'upper right')
# draw the title.
title('HST from %s to %s' % (str(arg1),str(arg2)))
show()
def plotContoursOverHSTTrack(data, hst_start, hst_stop, tlefile, ephemerisOnline = False, SAA_zoomed = False, hold_on = False):
num_points = int((hst_stop - hst_start)/ephem.minute) + 1
# Read in the HST Two-Line ephemeris
if ephemerisOnline:
import urllib2
hand = urllib2.open('http://celestrak.com/NORAD/elements/science.txt')
data = hand.readlines()
hand.close()
HST = [[d[val], d[val+1], d[val+2]] for val, line in enumerate(d) if 'HST' in line]
hst = ephem.readtle(string.strip(HST[0][0]), string.strip(HST[0][1]), string.strip(HST[0][2]))
else:
temp = open(tlefile, 'r').readlines()
hst = ephem.readtle(string.strip(temp[0]), string.strip(temp[1]), string.strip(temp[2]))
cur_time = hst_start
hst_longs = []
hst_lats = []
for i in range(0,num_points):
hst.compute(cur_time)
hst_longs.append(hst.sublong.znorm*180.0/math.pi)
hst_lats.append(hst.sublat*180.0/math.pi)
cur_time = cur_time + ephem.minute
hst_longs = N.array(hst_longs)
hst_lats = N.array(hst_lats)
#projection
lon_0 = 335
lat_0 = -20
llcrnrlat = -60
llcrnrlon = -100
urcrnrlat = 20
urcrnrlon = 60
# use these values to setup Basemap instance.
width = 14000000
height = 10000000
#SAA
if SAA_zoomed:
m = Basemap(width = width, height = height, resolution = 'c', projection = 'aeqd', lat_0 = lat_0, lon_0 = lon_0)
sz = 100
else:
m = Basemap(resolution='c', projection='mbtfpq', lon_0 = lon_0)
sz = 35
#OTHER PROJECTIONS
# crashed?
#m = Basemap(resolution='c', projection='aeqd', lat_0 = lat_0, lon_0 = lon_0)
# Full map, good
#m = Basemap(resolution='c', projection='mbtfpq', lon_0 = lon_0)
# Full map, diff projection
#m = Basemap(resolution='c', projection='moll', lon_0 = lon_0)
# Globe, SAA well presented
#m = Basemap(resolution='c', projection='ortho', lon_0 = lon_0, lat_0 = lat_0)
# Square, SAA well presented.
#m = Basemap(resolution='c',projection='cyl',llcrnrlat=llcrnrlat,llcrnrlon=llcrnrlon,urcrnrlat=urcrnrlat,urcrnrlon=urcrnrlon)
p = FontProperties()
font1 = p.copy()
font1.set_size('small')
# draw coasts and fill continents.
m.drawcoastlines(linewidth = 0.5)
#m.fillcontinents()
m.drawparallels(arange(-80,81,10),labels=[1,1,0,0],fontproperties=font1,labelstyle="+/-")
m.drawmeridians(arange(-180,180,20),labels=[0,0,0,1],fontproperties=font1,labelstyle="+/-")
m.bluemarble()
m.drawmapboundary()
# SAA 02
x2,y2 = m([357.4-360,357.6-360,356.9-360,355.0-360,352.3-360,348.7-360,342.9-360,336.4-360,324.8-360,303.2-360,292.1-360,289.0-360,285.9-360,283.5-360,282.5-360,282.4-360,282.7-360,357.4-360], \
[-28.3,-26.1,-23.7,-21.2,-18.8,-16.3,-13.0,-10.6, -9.1,-11.9,-14.9,-17.0,-19.1,-21.3,-23.7,-26.0,-28.6,-28.3])
# SAA 03
x3,y3 = m([ 20.0, 21.0, 19.0, 7.5,347.0-360,336.4-360,324.8-360,303.2-360,292.1-360,285.9-360,283.5-360,282.5-360,282.4-360,282.7-360, 20.0], \
[-28.3,-27.5,-26.1,-19.8, -9.6, -7.6, -6.0, -7.9,-12.0,-17.1,-20.3,-23.5,-26.0,-28.6,-28.3])
# SAA 04
x4,y4 = m([ 25.0, 7.0,351.0-360,341.0-360,318.0-360,300.0-360,290.0-360,284.0-360,278.0-360,273.0-360,275.0-360, 25.0], \
[-28.5,-16.0, -6.5, -2.0, 1.0, -3.0, -7.0,-10.0,-15.0,-20.0,-30.0,-28.5])
# SAA 05,23
x5,y5 = m([300.0-360, 45.0, 40.0, 30.0, 10.0, 0.0,341.0-360,318.0-360,300.0-360,283.0-360,273.0-360,275.0-360,300.0-360], \
[-50.0,-30.0,-25.0,-21.0,-15.0,-10.2, -2.0, 1.0, -3.0, -8.0,-20.0,-30.0,-50.0])
# SAA 06
x6,y6 = m([ 20.0, 21.0, 19.0, 7.5,347.0-360,336.4-360,324.8-360,303.2-360,292.1-360,285.9-360,283.5-360,282.5-360,282.4-360,282.7-360, 20.0], \
[-28.3,-27.5,-26.1,-19.8, -9.6, -7.6, -6.0, -7.9,-12.0,-17.1,-20.3,-23.5,-26.0,-28.6,-28.3])
#SAA
saa02 = m.plot(x2,y2,marker='D',markersize=4.0,markeredgewidth=0.0,color='turquoise',linestyle='-',label='02')
saa03 = m.plot(x3,y3,marker='v',markersize=4.0,markeredgewidth=0.0,color='white',linestyle='-',label='03')
saa04 = m.plot(x4,y4,marker='^',markersize=4.0,markeredgewidth=0.0,color='orange',linestyle='-',label='04')
saa05 = m.plot(x5,y5,marker='s',markersize=4.0,markeredgewidth=0.0,color='green',linestyle='-',label='05')
saa06 = m.plot(x6,y6,marker='x',markersize=4.0,markeredgewidth=1.0,color='magenta',linestyle='-',label='06')
# HST observation ground track
xhst, yhst = m(hst_longs, hst_lats)
#hst = m.plot(xhst,yhst,marker='+',markersize=4.0,markeredgewidth=1.0,color='red',linestyle='-',linewidth=0.7,label='hst')
#scatter plot
if hold_on:
scatter = m.scatter(xhst[:-1], yhst[:-1], s = sz, c = data, cmap = cm.jet, linestyle = 'solid', zorder = 11, hold = 'on')
else:
fig = P.figure(1)
points = N.array([xhst, yhst]).T.reshape(-1, 1, 2)
segments = N.concatenate([points[:-1], points[1:]], axis=1)
ax = P.axes()
lc = LineCollection(segments, cmap = P.get_cmap('jet'), norm = P.Normalize(0, 10000))
lc.set_array(data)
lc.set_linewidth(5)
ax.add_collection(lc)
axcb = fig.colorbar(lc)
axcb.set_label('EVENTS')
#contour plot
#cs = m.contour(xhst, yhst, data, 15, linewidths = 1.5)
#mx = N.max(data)
#cbar = map.colorbar(s, ticks=[N.max(data), mx//2., mx], orientation='vertical')
hst_label = 'HST once per minute'
font = p.copy()
#font.set_size('xx-small')
font.set_size('small')
leg=legend((saa02,saa03,saa04,saa05,saa06,hst), \
('PASS SAA Level 1 - FGS Guidance & STIS LV', \
'PASS SAA Level 2 - STIS', \
'PASS SAA Level 3 - ACS & WFC3', \
'PASS SAA Level 4 - Astrometry & NICMOS', \
'PASS SAA Level 5 - COS', \
hst_label), \
prop=font,numpoints=2,borderpad=0.3,loc='upper center',borderaxespad=0.0,ncol=2)
leg.get_frame().set_alpha(0.7)
# draw the title.
P.title('HST from %s to %s' % (str(arg1),str(arg2)))
if hold_on == False: P.show()
def countEvents(eventsData, hvData, hvlow, hvhigh, sampling):
'''
Counts EVENTS. Will prioritize eventsData over hvData ie
time stamps in eventsData have more weight compared to
time stamps in hvData. Sampling is not taken into account
at this point. Should be added to be used for NUV data correctly.
The algorithm is a little flacky and should be maybe rewritten
as it is not the fastest.
'''
results = 0
#Check the MJD interval common for both, but selects
#the earlier time .
if eventsData['MJD'][0] < hvData['MJD'][0]:
minMJD = eventsData['MJD'][0]
else:
minMJD = hvData['MJD'][0]
if eventsData['MJD'][-1] > hvData['MJD'][-1]:
maxMJD = hvData['MJD'][-1]
else:
maxMJD = eventsData['MJD'][-1]
#newtime vector has a timestamp for each second
delta = int((maxMJD - minMJD) * 24 * 60 * 60) #in seconds
newtime = N.arange(delta + 1) / (24 * 60 * 60.) + minMJD
i = 0
j = 0
length_i = eventsData['MJD'].shape[0]
length_j = hvData['MJD'].shape[0]
#algorithm to count EVENTS from unevenly spaced data
for x in newtime:
if i == 0 and x < eventsData['MJD'][0]:
if j == 0 and x < hvData['MJD'][0]:
if hvData['COUNTS'][0] > hvlow and hvData['COUNTS'][0] < hvhigh:
results += eventsData['COUNTS'][0]
elif x < hvData['MJD'][j] and j > 0:
if hvData['COUNTS'][j-1] > hvlow and hvData['COUNTS'][j-1] < hvhigh:
results += eventsData['COUNTS'][0]
elif x == hvData['MJD'][j]:
if hvData['COUNTS'][j] > hvlow and hvData['COUNTS'][j] < hvhigh:
results += eventsData['COUNTS'][0]
j += 1
else:
j += 1
if j < length_j:
if hvData['COUNTS'][j] > hvlow and hvData['COUNTS'][j] < hvhigh:
results += eventsData['COUNTS'][0]
else:
if hvData['COUNTS'][-1] > hvlow and hvData['COUNTS'][-1] < hvhigh:
results += eventsData['COUNTS'][0]
elif i < length_i and x < eventsData['MJD'][i]:
if j == 0 and x < hvData['MJD'][0]:
if hvData['COUNTS'][0] > hvlow and hvData['COUNTS'][0] < hvhigh:
results += eventsData['COUNTS'][i-1]
elif x < hvData['MJD'][j] and j > 0:
if hvData['COUNTS'][j-1] > hvlow and hvData['COUNTS'][j-1] < hvhigh:
results += eventsData['COUNTS'][i-1]
elif x == hvData['MJD'][j]:
if hvData['COUNTS'][j] > hvlow and hvData['COUNTS'][j] < hvhigh:
results += eventsData['COUNTS'][i-1]
j += 1
else:
j += 1
if j < length_j:
if hvData['COUNTS'][j] > hvlow and hvData['COUNTS'][j] < hvhigh:
results += eventsData['COUNTS'][i-1]
else:
if hvData['COUNTS'][-1] > hvlow and hvData['COUNTS'][-1] < hvhigh:
results += eventsData['COUNTS'][i-1]
elif i < length_i and x == eventsData['MJD'][i]:
if j == 0 and x < hvData['MJD'][0]:
if hvData['COUNTS'][0] > hvlow and hvData['COUNTS'][0] < hvhigh:
results += eventsData['COUNTS'][i]
elif x < hvData['MJD'][j] and j > 0:
if hvData['COUNTS'][j-1] > hvlow and hvData['COUNTS'][j-1] < hvhigh:
results += eventsData['COUNTS'][i]
elif x == hvData['MJD'][j]:
if hvData['COUNTS'][j] > hvlow and hvData['COUNTS'][j] < hvhigh:
results += eventsData['COUNTS'][i]
j += 1
else:
j += 1
if j < length_j:
if hvData['COUNTS'][j] > hvlow and hvData['COUNTS'][j] < hvhigh:
results += eventsData['COUNTS'][i]
else:
if hvData['COUNTS'][-1] > hvlow and hvData['COUNTS'][-1] < hvhigh:
results += eventsData['COUNTS'][i]
i += 1
else:
if j == 0 and x < hvData['MJD'][0]:
if hvData['COUNTS'][0] > hvlow and hvData['COUNTS'][0] < hvhigh:
results += eventsData['COUNTS'][i]
elif x < hvData['MJD'][j] and j > 0:
if hvData['COUNTS'][j-1] > hvlow and hvData['COUNTS'][j-1] < hvhigh:
results += eventsData['COUNTS'][i]
elif x == hvData['MJD'][j]:
if hvData['COUNTS'][j] > hvlow and hvData['COUNTS'][j] < hvhigh:
results += eventsData['COUNTS'][i]
j += 1
else:
j += 1
if j < length_j:
if hvData['COUNTS'][j] > hvlow and hvData['COUNTS'][j] < hvhigh:
results += eventsData['COUNTS'][i]
else:
if hvData['COUNTS'][-1] > hvlow and hvData['COUNTS'][-1] < hvhigh:
results += eventsData['COUNTS'][i]
i += 1
return results
def Bin(data, column, to_sample):
result = []
a = 0
max = len(data[column])
ran = max / to_sample
for i in range(ran):
if (i+1)*to_sample < max:
result.append(N.sum(data[column][a:(i+1)*to_sample]))
a = (i+1)*to_sample
else:
result.append(N.sum(data[column][a:-1]))
return N.array(result)
if __name__ == '__main__':
FUVsize = 16384*1024.
MAMAsize = 1024*1024.
FUVhvlow = -4135
FUVhvhigh = -3985
MAMAhvlow = -1825
MAMAhvhigh = -1675
TotalEvents = False #True
calculateEvents = False #True
#hard coded values and names
startdate = '2010.178:01:30:00'
stopdate = '2010.178:04:00:00'
telepath = '/smov/cos/housekeeping/Telemetry/' # PATH
FUVAevents = 'LDCEFECA' # FUVA
FUVBevents = 'LDCEFECB' # FUVB
events = 'LMEVENTS' # MAMA
FUVAhv = 'LDCHVMNA'
FUVBhv = 'LDCHVMNB'
cmpv = 'LMMCPV'
tlefile = '/Users/niemi/Desktop/Misc/hst_new.tle'
ephemerisOnline = True
#telemetry data files
#FUV
FUVAcountsfile = telepath + FUVAevents
FUVBcountsfile = telepath + FUVBevents
FUVAhvfile = telepath + FUVAhv
FUVBhvfile = telepath + FUVBhv
#MAMA
lmeventsfile = telepath + events #EVENTS
lmmcpvfile = telepath + cmpv #HV
#Time manipulations
arg1 = time_util.spss_time(startdate)
arg2 = time_util.spss_time(stopdate)
hst_start = ephem.Date(arg1.strftime("%Y/%m/%d %H:%M:%S"))
hst_stop = ephem.Date(arg2.strftime("%Y/%m/%d %H:%M:%S"))
hst_startMJD = toJulian(hst_start)
hst_stopMJD = toJulian(hst_stop)
#read the telemetry data in
#FUV
FUVAeventsData = N.loadtxt(FUVAcountsfile, dtype={'names': ('MJD', 'COUNTS'), 'formats' : ('f8','f4')})
FUVBeventsData = N.loadtxt(FUVBcountsfile, dtype={'names': ('MJD', 'COUNTS'), 'formats' : ('f8','f4')})
FUVAhvData = N.loadtxt(FUVAhvfile, dtype={'names': ('MJD', 'COUNTS'), 'formats' : ('f8', 'f4')})
FUVBhvData = N.loadtxt(FUVBhvfile, dtype={'names': ('MJD', 'COUNTS'), 'formats' : ('f8', 'f4')})
#MAMA
MAMAeventsData = N.loadtxt(lmeventsfile, dtype={'names': ('MJD', 'COUNTS'), 'formats' : ('f8','f4')})
MAMAhvData = N.loadtxt(lmmcpvfile, dtype={'names': ('MJD', 'COUNTS'), 'formats' : ('f8', 'f4')})
if calculateEvents:
FUVA = countEvents(FUVAeventsData, FUVAhvData, FUVhvlow, FUVhvhigh, 10)
print '\nThe total number of events at HV-LOW (%0.f <= HV <= %0.f) for FUVA:' % (FUVhvlow, FUVhvhigh)
print '%.0f' % FUVA
print 'and per pixel'
print '%.0f' % (FUVA / FUVsize)
FUVB = countEvents(FUVBeventsData, FUVBhvData, FUVhvlow, FUVhvhigh, 10)
print '\nThe total number of events at HV-LOW (%0.f <= HV <= %0.f) for FUVB:' % (FUVhvlow, FUVhvhigh)
print '%.0f' % FUVB
print 'and per pixel'
print '%.0f' % (FUVB / FUVsize)
MAMA = countEvents(MAMAeventsData, MAMAhvData, MAMAhvlow, MAMAhvhigh, 60)
print '\nThe total number of events at HV-LOW (%0.f <= HV <= %0.f) for MAMA:' % (MAMAhvlow, MAMAhvhigh)
print '%.0f' % MAMA
print 'and per pixel'
print '%.0f' % (MAMA / FUVsize)
#with other limits
FUVhvlow = -14135
FUVhvhigh = -985
MAMAhvlow = -11825
MAMAhvhigh = -675
FUVA = countEvents(FUVAeventsData, FUVAhvData, FUVhvlow, FUVhvhigh, 10)
print '\nThe total number of events at %0.f <= HV <= %0.f for FUVA:' % (FUVhvlow, FUVhvhigh)
print '%.0f' % FUVA
print 'and per pixel'
print '%.0f' % (FUVA / FUVsize)
FUVB = countEvents(FUVBeventsData, FUVBhvData, FUVhvlow, FUVhvhigh, 10)
print '\nThe total number of events at %0.f <= HV <= %0.f for FUVB:' % (FUVhvlow, FUVhvhigh)
print '%.0f' % FUVB
print 'and per pixel'
print '%.0f' % (FUVB / FUVsize)
MAMA = countEvents(MAMAeventsData, MAMAhvData, MAMAhvlow, MAMAhvhigh, 60)
print '\nThe total number of events at %0.f <= HV <= %0.f for MAMA:' % (MAMAhvlow, MAMAhvhigh)
print '%.0f' % MAMA
print 'and per pixel'
print '%.0f' % (MAMA / FUVsize)
if TotalEvents:
print '\nThe total number of telemetry measurements (only changes count) for'
print 'FUVA %0.f' % FUVAeventsData.shape[0]
print 'FUVB %0.f' % FUVBeventsData.shape[0]
print 'MAMA %0.f' % MAMAeventsData.shape[0]
#Total counts
minFUVAMJD = N.min(FUVAeventsData['MJD'])
maxFUVAMJD = N.max(FUVAeventsData['MJD'])
paddedFUVAevents = padData(FUVAeventsData, minFUVAMJD, maxFUVAMJD)
sumFUVAevents = N.sum(paddedFUVAevents['COUNTS'])
print '\nThe total event counts in the FUVA telemetry data (between %f and %f MJD)' % (minFUVAMJD, maxFUVAMJD)
print 'between %s and %s' % (D.datetime(*fromJulian(minFUVAMJD)[0:6]).strftime('%A %d, %B, %Y (%H:%M%Z)'),
D.datetime(*fromJulian(maxFUVAMJD)[0:6]).strftime('%A %d, %B, %Y (%H:%M%Z)'))
print '%.0f' % sumFUVAevents
print 'and per pixel'
print '%.0f' % (sumFUVAevents / FUVsize)
minFUVBMJD = N.min(FUVBeventsData['MJD'])
maxFUVBMJD = N.max(FUVBeventsData['MJD'])
paddedFUVBevents = padData(FUVBeventsData, minFUVBMJD, maxFUVBMJD)
sumFUVBevents = N.sum(paddedFUVBevents['COUNTS'])
print '\nThe total event counts in the FUVB telemetry data (between %f and %f MJD)' % (minFUVBMJD, maxFUVBMJD)
print 'between %s and %s' % (D.datetime(*fromJulian(minFUVBMJD)[0:6]).strftime('%A %d, %B, %Y (%H:%M%Z)'),
D.datetime(*fromJulian(maxFUVBMJD)[0:6]).strftime('%A %d, %B, %Y (%H:%M%Z)'))
print '%.0f' % sumFUVBevents
print 'and per pixel'
print '%.0f' % (sumFUVBevents / FUVsize)
minMAMAMJD = N.min(MAMAeventsData['MJD'])
maxMAMAMJD = N.max(MAMAeventsData['MJD'])
paddedMAMAevents = padData(MAMAeventsData, minMAMAMJD, maxMAMAMJD)
sumMAMAevents = N.sum(paddedMAMAevents['COUNTS'])
print '\nThe total event counts in the MAMA telemetry data (between %f and %f MJD)' % (minMAMAMJD, maxMAMAMJD)
print 'between %s and %s' % (D.datetime(*fromJulian(minMAMAMJD)[0:6]).strftime('%A %d, %B, %Y (%H:%M%Z)'),
D.datetime(*fromJulian(maxMAMAMJD)[0:6]).strftime('%A %d, %B, %Y (%H:%M%Z)'))
print '%.0f' % sumMAMAevents
print 'and per pixel'
print '%.0f' % (sumMAMAevents / MAMAsize)
#telemetry between the plotted dates
#FUV
print '\nStart and Stop MJDs:', hst_startMJD, hst_stopMJD
FUVAevents = padData(FUVAeventsData, hst_startMJD, hst_stopMJD)
FUVBevents = padData(FUVBeventsData, hst_startMJD, hst_stopMJD)
FUVAhv = padData(FUVAhvData, hst_startMJD, hst_stopMJD)
FUVBhv = padData(FUVBhvData, hst_startMJD, hst_stopMJD)
print 'Number of FUV Telemetry points measured:', FUVAevents.shape[0]
#MAMA
MAMAevents = padData(MAMAeventsData, hst_startMJD, hst_stopMJD)
MAMAhv = padData(MAMAhvData, hst_startMJD, hst_stopMJD)
print 'Number of MAMA Telemetry points measured:', MAMAevents.shape[0]
#plot counts vs. time
plotCounts(FUVAevents, FUVBevents, FUVAhv, FUVBhv, MAMAevents, MAMAhv, hst_startMJD, hst_stopMJD)
#Bin data to 60 second bins
FUVA60cnts = Bin(FUVAevents, 'COUNTS', 60)
FUVB60cnts = Bin(FUVBevents, 'COUNTS', 60)
MAMA60cnts = Bin(MAMAevents, 'COUNTS', 60)
print 'Total counts for FUVA, B, and MAMA:'
print N.sum(FUVA60cnts), N.sum(FUVB60cnts), N.sum(MAMA60cnts)
#make HST track plot
#print 'Will plot HST ground tracks'
#plotHSTtrack(hst_start, hst_stop, tlefile)
#Overplot counts
print 'Will plot FUV counts'
plotContoursOverHSTTrack(FUVA60cnts+FUVB60cnts, hst_start, hst_stop, tlefile, ephemerisOnline, SAA_zoomed = True)
plotContoursOverHSTTrack(FUVA60cnts+FUVB60cnts, hst_start, hst_stop, tlefile, ephemerisOnline)
print 'Will plot MAMA counts'
plotContoursOverHSTTrack(MAMA60cnts, hst_start, hst_stop, tlefile, ephemerisOnline, SAA_zoomed = True)
plotContoursOverHSTTrack(MAMA60cnts, hst_start, hst_stop, tlefile, ephemerisOnline)
#plot many tracks:
for i in range(11,30):
startdate = '2010.0%i:00:00:00' % i
#stopdate = '2010.0%i:23:59:59' % i
stopdate = '2010.0%i:06:00:0' % i
arg1 = time_util.spss_time(startdate)
arg2 = time_util.spss_time(stopdate)
hst_start = ephem.Date(arg1.strftime("%Y/%m/%d %H:%M:%S"))
hst_stop = ephem.Date(arg2.strftime("%Y/%m/%d %H:%M:%S"))
hst_startMJD = toJulian(hst_start)
hst_stopMJD = toJulian(hst_stop)
print 'Start and Stop MJDs:', hst_startMJD, hst_stopMJD
#read the telemetry data in
#FUV
FUVAeventsData = N.loadtxt(FUVAcountsfile, dtype={'names': ('MJD', 'COUNTS'), 'formats' : ('f8','f4')})
FUVBeventsData = N.loadtxt(FUVBcountsfile, dtype={'names': ('MJD', 'COUNTS'), 'formats' : ('f8','f4')})
FUVAhvData = N.loadtxt(FUVAhvfile, dtype={'names': ('MJD', 'COUNTS'), 'formats' : ('f8', 'f4')})
FUVBhvData = N.loadtxt(FUVBhvfile, dtype={'names': ('MJD', 'COUNTS'), 'formats' : ('f8', 'f4')})
#MAMA
MAMAeventsData = N.loadtxt(lmeventsfile, dtype={'names': ('MJD', 'COUNTS'), 'formats' : ('f8','f4')})
MAMAhvData = N.loadtxt(lmmcpvfile, dtype={'names': ('MJD', 'COUNTS'), 'formats' : ('f8', 'f4')})
#telemetry between the plotted dates
#FUV
FUVAevents = padData(FUVAeventsData, hst_startMJD, hst_stopMJD)
FUVBevents = padData(FUVBeventsData, hst_startMJD, hst_stopMJD)
FUVAhv = padData(FUVAhvData, hst_startMJD, hst_stopMJD)
FUVBhv = padData(FUVBhvData, hst_startMJD, hst_stopMJD)
print 'Number of FUV Telemetry points measured:', FUVAevents.shape[0]
#MAMA
MAMAevents = padData(MAMAeventsData, hst_startMJD, hst_stopMJD)
MAMAhv = padData(MAMAhvData, hst_startMJD, hst_stopMJD)
print 'Number of MAMA Telemetry points measured:', MAMAevents.shape[0]
#plot counts vs time
#plotCounts(FUVAevents, FUVBevents, FUVAhv, FUVBhv, MAMAevents, MAMAhv, hst_startMJD, hst_stopMJD)
FUVA60cnts = Bin(FUVAevents, 'COUNTS', 60)
FUVB60cnts = Bin(FUVBevents, 'COUNTS', 60)
MAMA60cnts = Bin(MAMAevents, 'COUNTS', 60)
print 'Total counts for FUVA, B, and MAMA:'
print N.sum(FUVA60cnts), N.sum(FUVB60cnts), N.sum(MAMA60cnts)
plotContoursOverHSTTrack(FUVA60cnts+FUVB60cnts, hst_start, hst_stop, tlefile, ephemerisOnline, hold_on = True)
P.show()
| bsd-2-clause |
datapythonista/pandas | pandas/tests/frame/methods/test_tz_localize.py | 4 | 2100 | import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
class TestTZLocalize:
# See also:
# test_tz_convert_and_localize in test_tz_convert
def test_tz_localize(self, frame_or_series):
rng = date_range("1/1/2011", periods=100, freq="H")
obj = DataFrame({"a": 1}, index=rng)
if frame_or_series is not DataFrame:
obj = obj["a"]
result = obj.tz_localize("utc")
expected = DataFrame({"a": 1}, rng.tz_localize("UTC"))
if frame_or_series is not DataFrame:
expected = expected["a"]
assert result.index.tz.zone == "UTC"
tm.assert_equal(result, expected)
def test_tz_localize_axis1(self):
rng = date_range("1/1/2011", periods=100, freq="H")
df = DataFrame({"a": 1}, index=rng)
df = df.T
result = df.tz_localize("utc", axis=1)
assert result.columns.tz.zone == "UTC"
expected = DataFrame({"a": 1}, rng.tz_localize("UTC"))
tm.assert_frame_equal(result, expected.T)
def test_tz_localize_naive(self, frame_or_series):
# Can't localize if already tz-aware
rng = date_range("1/1/2011", periods=100, freq="H", tz="utc")
ts = Series(1, index=rng)
ts = frame_or_series(ts)
with pytest.raises(TypeError, match="Already tz-aware"):
ts.tz_localize("US/Eastern")
@pytest.mark.parametrize("copy", [True, False])
def test_tz_localize_copy_inplace_mutate(self, copy, frame_or_series):
# GH#6326
obj = frame_or_series(
np.arange(0, 5), index=date_range("20131027", periods=5, freq="1H", tz=None)
)
orig = obj.copy()
result = obj.tz_localize("UTC", copy=copy)
expected = frame_or_series(
np.arange(0, 5),
index=date_range("20131027", periods=5, freq="1H", tz="UTC"),
)
tm.assert_equal(result, expected)
tm.assert_equal(obj, orig)
assert result.index is not obj.index
assert result is not obj
| bsd-3-clause |
befelix/GPy | GPy/plotting/matplot_dep/defaults.py | 7 | 3838 | #===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
from matplotlib import cm
from .. import Tango
'''
This file is for defaults for the gpy plot, specific to the plotting library.
Create a kwargs dictionary with the right name for the plotting function
you are implementing. If you do not provide defaults, the default behaviour of
the plotting library will be used.
In the code, always ise plotting.gpy_plots.defaults to get the defaults, as
it gives back an empty default, when defaults are not defined.
'''
# Data plots:
data_1d = dict(lw=1.5, marker='x', color='k')
data_2d = dict(s=35, edgecolors='none', linewidth=0., cmap=cm.get_cmap('hot'), alpha=.5)
inducing_1d = dict(lw=0, s=500, color=Tango.colorsHex['darkRed'])
inducing_2d = dict(s=17, edgecolor='k', linewidth=.4, color='white', alpha=.5, marker='^')
inducing_3d = dict(lw=.3, s=500, color=Tango.colorsHex['darkRed'], edgecolor='k')
xerrorbar = dict(color='k', fmt='none', elinewidth=.5, alpha=.5)
yerrorbar = dict(color=Tango.colorsHex['darkRed'], fmt='none', elinewidth=.5, alpha=.5)
# GP plots:
meanplot_1d = dict(color=Tango.colorsHex['mediumBlue'], linewidth=2)
meanplot_2d = dict(cmap='hot', linewidth=.5)
meanplot_3d = dict(linewidth=0, antialiased=True, cstride=1, rstride=1, cmap='hot', alpha=.3)
samples_1d = dict(color=Tango.colorsHex['mediumBlue'], linewidth=.3)
samples_3d = dict(cmap='hot', alpha=.1, antialiased=True, cstride=1, rstride=1, linewidth=0)
confidence_interval = dict(edgecolor=Tango.colorsHex['darkBlue'], linewidth=.5, color=Tango.colorsHex['lightBlue'],alpha=.2)
density = dict(alpha=.5, color=Tango.colorsHex['lightBlue'])
# GPLVM plots:
data_y_1d = dict(linewidth=0, cmap='RdBu', s=40)
data_y_1d_plot = dict(color='k', linewidth=1.5)
# Kernel plots:
ard = dict(edgecolor='k', linewidth=1.2)
# Input plots:
latent = dict(aspect='auto', cmap='Greys', interpolation='bicubic')
gradient = dict(aspect='auto', cmap='RdBu', interpolation='nearest', alpha=.7)
magnification = dict(aspect='auto', cmap='Greys', interpolation='bicubic')
latent_scatter = dict(s=20, linewidth=.2, edgecolor='k', alpha=.9)
annotation = dict(fontdict=dict(family='sans-serif', weight='light', fontsize=9), zorder=.3, alpha=.7)
| bsd-3-clause |
Lx37/seaborn | doc/sphinxext/plot_generator.py | 38 | 10035 | """
Sphinx plugin to run example scripts and create a gallery page.
Lightly modified from the mpld3 project.
"""
from __future__ import division
import os
import os.path as op
import re
import glob
import token
import tokenize
import shutil
import json
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import image
RST_TEMPLATE = """
.. _{sphinx_tag}:
{docstring}
.. image:: {img_file}
**Python source code:** :download:`[download source: {fname}]<{fname}>`
.. literalinclude:: {fname}
:lines: {end_line}-
"""
INDEX_TEMPLATE = """
.. raw:: html
<style type="text/css">
.figure {{
position: relative;
float: left;
margin: 10px;
width: 180px;
height: 200px;
}}
.figure img {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure:hover img {{
-webkit-filter: blur(3px);
-moz-filter: blur(3px);
-o-filter: blur(3px);
-ms-filter: blur(3px);
filter: blur(3px);
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure span {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
background: #000;
color: #fff;
visibility: hidden;
opacity: 0;
z-index: 100;
}}
.figure p {{
position: absolute;
top: 45%;
width: 170px;
font-size: 110%;
}}
.figure:hover span {{
visibility: visible;
opacity: .4;
}}
.caption {{
position: absolue;
width: 180px;
top: 170px;
text-align: center !important;
}}
</style>
.. _{sphinx_tag}:
Example gallery
===============
{toctree}
{contents}
.. raw:: html
<div style="clear: both"></div>
"""
def create_thumbnail(infile, thumbfile,
width=300, height=300,
cx=0.5, cy=0.5, border=4):
baseout, extout = op.splitext(thumbfile)
im = image.imread(infile)
rows, cols = im.shape[:2]
x0 = int(cx * cols - .5 * width)
y0 = int(cy * rows - .5 * height)
xslice = slice(x0, x0 + width)
yslice = slice(y0, y0 + height)
thumb = im[yslice, xslice]
thumb[:border, :, :3] = thumb[-border:, :, :3] = 0
thumb[:, :border, :3] = thumb[:, -border:, :3] = 0
dpi = 100
fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi)
ax = fig.add_axes([0, 0, 1, 1], aspect='auto',
frameon=False, xticks=[], yticks=[])
ax.imshow(thumb, aspect='auto', resample=True,
interpolation='bilinear')
fig.savefig(thumbfile, dpi=dpi)
return fig
def indent(s, N=4):
"""indent a string"""
return s.replace('\n', '\n' + N * ' ')
class ExampleGenerator(object):
"""Tools for generating an example page from a file"""
def __init__(self, filename, target_dir):
self.filename = filename
self.target_dir = target_dir
self.thumbloc = .5, .5
self.extract_docstring()
with open(filename, "r") as fid:
self.filetext = fid.read()
outfilename = op.join(target_dir, self.rstfilename)
# Only actually run it if the output RST file doesn't
# exist or it was modified less recently than the example
if (not op.exists(outfilename)
or (op.getmtime(outfilename) < op.getmtime(filename))):
self.exec_file()
else:
print("skipping {0}".format(self.filename))
@property
def dirname(self):
return op.split(self.filename)[0]
@property
def fname(self):
return op.split(self.filename)[1]
@property
def modulename(self):
return op.splitext(self.fname)[0]
@property
def pyfilename(self):
return self.modulename + '.py'
@property
def rstfilename(self):
return self.modulename + ".rst"
@property
def htmlfilename(self):
return self.modulename + '.html'
@property
def pngfilename(self):
pngfile = self.modulename + '.png'
return "_images/" + pngfile
@property
def thumbfilename(self):
pngfile = self.modulename + '_thumb.png'
return pngfile
@property
def sphinxtag(self):
return self.modulename
@property
def pagetitle(self):
return self.docstring.strip().split('\n')[0].strip()
@property
def plotfunc(self):
match = re.search(r"sns\.(.+plot)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+map)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+Grid)\(", self.filetext)
if match:
return match.group(1)
return ""
def extract_docstring(self):
""" Extract a module-level docstring
"""
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(lines.__iter__().next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs,
# extract the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')
).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
thumbloc = None
for i, line in enumerate(docstring.split("\n")):
m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line)
if m:
thumbloc = float(m.group(1)), float(m.group(2))
break
if thumbloc is not None:
self.thumbloc = thumbloc
docstring = "\n".join([l for l in docstring.split("\n")
if not l.startswith("_thumb")])
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row
def exec_file(self):
print("running {0}".format(self.filename))
plt.close('all')
my_globals = {'pl': plt,
'plt': plt}
execfile(self.filename, my_globals)
fig = plt.gcf()
fig.canvas.draw()
pngfile = op.join(self.target_dir, self.pngfilename)
thumbfile = op.join("example_thumbs", self.thumbfilename)
self.html = "<img src=../%s>" % self.pngfilename
fig.savefig(pngfile, dpi=75, bbox_inches="tight")
cx, cy = self.thumbloc
create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)
def toctree_entry(self):
return " ./%s\n\n" % op.splitext(self.htmlfilename)[0]
def contents_entry(self):
return (".. raw:: html\n\n"
" <div class='figure align-center'>\n"
" <a href=./{0}>\n"
" <img src=../_static/{1}>\n"
" <span class='figure-label'>\n"
" <p>{2}</p>\n"
" </span>\n"
" </a>\n"
" </div>\n\n"
"\n\n"
"".format(self.htmlfilename,
self.thumbfilename,
self.plotfunc))
def main(app):
static_dir = op.join(app.builder.srcdir, '_static')
target_dir = op.join(app.builder.srcdir, 'examples')
image_dir = op.join(app.builder.srcdir, 'examples/_images')
thumb_dir = op.join(app.builder.srcdir, "example_thumbs")
source_dir = op.abspath(op.join(app.builder.srcdir,
'..', 'examples'))
if not op.exists(static_dir):
os.makedirs(static_dir)
if not op.exists(target_dir):
os.makedirs(target_dir)
if not op.exists(image_dir):
os.makedirs(image_dir)
if not op.exists(thumb_dir):
os.makedirs(thumb_dir)
if not op.exists(source_dir):
os.makedirs(source_dir)
banner_data = []
toctree = ("\n\n"
".. toctree::\n"
" :hidden:\n\n")
contents = "\n\n"
# Write individual example files
for filename in glob.glob(op.join(source_dir, "*.py")):
ex = ExampleGenerator(filename, target_dir)
banner_data.append({"title": ex.pagetitle,
"url": op.join('examples', ex.htmlfilename),
"thumb": op.join(ex.thumbfilename)})
shutil.copyfile(filename, op.join(target_dir, ex.pyfilename))
output = RST_TEMPLATE.format(sphinx_tag=ex.sphinxtag,
docstring=ex.docstring,
end_line=ex.end_line,
fname=ex.pyfilename,
img_file=ex.pngfilename)
with open(op.join(target_dir, ex.rstfilename), 'w') as f:
f.write(output)
toctree += ex.toctree_entry()
contents += ex.contents_entry()
if len(banner_data) < 10:
banner_data = (4 * banner_data)[:10]
# write index file
index_file = op.join(target_dir, 'index.rst')
with open(index_file, 'w') as index:
index.write(INDEX_TEMPLATE.format(sphinx_tag="example_gallery",
toctree=toctree,
contents=contents))
def setup(app):
app.connect('builder-inited', main)
| bsd-3-clause |
vivekmishra1991/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
elijah513/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
ujjwalkarn/data-science-from-scratch | code/introduction.py | 48 | 8085 | from __future__ import division
##########################
# #
# FINDING KEY CONNECTORS #
# #
##########################
users = [
{ "id": 0, "name": "Hero" },
{ "id": 1, "name": "Dunn" },
{ "id": 2, "name": "Sue" },
{ "id": 3, "name": "Chi" },
{ "id": 4, "name": "Thor" },
{ "id": 5, "name": "Clive" },
{ "id": 6, "name": "Hicks" },
{ "id": 7, "name": "Devin" },
{ "id": 8, "name": "Kate" },
{ "id": 9, "name": "Klein" },
{ "id": 10, "name": "Jen" }
]
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# first give each user an empty list
for user in users:
user["friends"] = []
# and then populate the lists with friendships
for i, j in friendships:
# this works because users[i] is the user whose id is i
users[i]["friends"].append(users[j]) # add i as a friend of j
users[j]["friends"].append(users[i]) # add j as a friend of i
def number_of_friends(user):
"""how many friends does _user_ have?"""
return len(user["friends"]) # length of friend_ids list
total_connections = sum(number_of_friends(user)
for user in users) # 24
num_users = len(users)
avg_connections = total_connections / num_users # 2.4
################################
# #
# DATA SCIENTISTS YOU MAY KNOW #
# #
################################
def friends_of_friend_ids_bad(user):
# "foaf" is short for "friend of a friend"
return [foaf["id"]
for friend in user["friends"] # for each of user's friends
for foaf in friend["friends"]] # get each of _their_ friends
from collections import Counter # not loaded by default
def not_the_same(user, other_user):
"""two users are not the same if they have different ids"""
return user["id"] != other_user["id"]
def not_friends(user, other_user):
"""other_user is not a friend if he's not in user["friends"];
that is, if he's not_the_same as all the people in user["friends"]"""
return all(not_the_same(friend, other_user)
for friend in user["friends"])
def friends_of_friend_ids(user):
return Counter(foaf["id"]
for friend in user["friends"] # for each of my friends
for foaf in friend["friends"] # count *their* friends
if not_the_same(user, foaf) # who aren't me
and not_friends(user, foaf)) # and aren't my friends
print friends_of_friend_ids(users[3]) # Counter({0: 2, 5: 1})
interests = [
(0, "Hadoop"), (0, "Big Data"), (0, "HBase"), (0, "Java"),
(0, "Spark"), (0, "Storm"), (0, "Cassandra"),
(1, "NoSQL"), (1, "MongoDB"), (1, "Cassandra"), (1, "HBase"),
(1, "Postgres"), (2, "Python"), (2, "scikit-learn"), (2, "scipy"),
(2, "numpy"), (2, "statsmodels"), (2, "pandas"), (3, "R"), (3, "Python"),
(3, "statistics"), (3, "regression"), (3, "probability"),
(4, "machine learning"), (4, "regression"), (4, "decision trees"),
(4, "libsvm"), (5, "Python"), (5, "R"), (5, "Java"), (5, "C++"),
(5, "Haskell"), (5, "programming languages"), (6, "statistics"),
(6, "probability"), (6, "mathematics"), (6, "theory"),
(7, "machine learning"), (7, "scikit-learn"), (7, "Mahout"),
(7, "neural networks"), (8, "neural networks"), (8, "deep learning"),
(8, "Big Data"), (8, "artificial intelligence"), (9, "Hadoop"),
(9, "Java"), (9, "MapReduce"), (9, "Big Data")
]
def data_scientists_who_like(target_interest):
return [user_id
for user_id, user_interest in interests
if user_interest == target_interest]
from collections import defaultdict
# keys are interests, values are lists of user_ids with that interest
user_ids_by_interest = defaultdict(list)
for user_id, interest in interests:
user_ids_by_interest[interest].append(user_id)
# keys are user_ids, values are lists of interests for that user_id
interests_by_user_id = defaultdict(list)
for user_id, interest in interests:
interests_by_user_id[user_id].append(interest)
def most_common_interests_with(user_id):
return Counter(interested_user_id
for interest in interests_by_user["user_id"]
for interested_user_id in users_by_interest[interest]
if interested_user_id != user_id)
###########################
# #
# SALARIES AND EXPERIENCE #
# #
###########################
salaries_and_tenures = [(83000, 8.7), (88000, 8.1),
(48000, 0.7), (76000, 6),
(69000, 6.5), (76000, 7.5),
(60000, 2.5), (83000, 10),
(48000, 1.9), (63000, 4.2)]
def make_chart_salaries_by_tenure(plt):
tenures = [tenure for salary, tenure in salaries_and_tenures]
salaries = [salary for salary, tenure in salaries_and_tenures]
plt.scatter(tenures, salaries)
plt.xlabel("Years Experience")
plt.ylabel("Salary")
plt.show()
# keys are years
# values are the salaries for each tenure
salary_by_tenure = defaultdict(list)
for salary, tenure in salaries_and_tenures:
salary_by_tenure[tenure].append(salary)
average_salary_by_tenure = {
tenure : sum(salaries) / len(salaries)
for tenure, salaries in salary_by_tenure.items()
}
def tenure_bucket(tenure):
if tenure < 2: return "less than two"
elif tenure < 5: return "between two and five"
else: return "more than five"
salary_by_tenure_bucket = defaultdict(list)
for salary, tenure in salaries_and_tenures:
bucket = tenure_bucket(tenure)
salary_by_tenure_bucket[bucket].append(salary)
average_salary_by_bucket = {
tenure_bucket : sum(salaries) / len(salaries)
for tenure_bucket, salaries in salary_by_tenure_bucket.iteritems()
}
#################
# #
# PAID_ACCOUNTS #
# #
#################
def predict_paid_or_unpaid(years_experience):
if years_experience < 3.0: return "paid"
elif years_experience < 8.5: return "unpaid"
else: return "paid"
######################
# #
# TOPICS OF INTEREST #
# #
######################
words_and_counts = Counter(word
for user, interest in interests
for word in interest.lower().split())
if __name__ == "__main__":
print
print "######################"
print "#"
print "# FINDING KEY CONNECTORS"
print "#"
print "######################"
print
print "total connections", total_connections
print "number of users", num_users
print "average connections", total_connections / num_users
print
# create a list (user_id, number_of_friends)
num_friends_by_id = [(user["id"], number_of_friends(user))
for user in users]
print "users sorted by number of friends:"
print sorted(num_friends_by_id,
key=lambda (user_id, num_friends): num_friends, # by number of friends
reverse=True) # largest to smallest
print
print "######################"
print "#"
print "# DATA SCIENTISTS YOU MAY KNOW"
print "#"
print "######################"
print
print "friends of friends bad for user 0:", friends_of_friend_ids_bad(users[0])
print "friends of friends for user 3:", friends_of_friend_ids(users[3])
print
print "######################"
print "#"
print "# SALARIES AND TENURES"
print "#"
print "######################"
print
print "average salary by tenure", average_salary_by_tenure
print "average salary by tenure bucket", average_salary_by_bucket
print
print "######################"
print "#"
print "# MOST COMMON WORDS"
print "#"
print "######################"
print
for word, count in words_and_counts.most_common():
if count > 1:
print word, count | unlicense |
yandex-load/volta | volta/core/postloader.py | 1 | 2832 | import logging
import pandas as pd
import json
import yaml
from volta.listeners.uploader.uploader import DataUploader
from volta.core.core import VoltaConfig
from volta.core.config.dynamic_options import DYNAMIC_OPTIONS
logger = logging.getLogger(__name__)
def main():
import argparse
parser = argparse.ArgumentParser(description='volta console post-loader')
parser.add_argument('--debug', dest='debug', action='store_true', default=False)
parser.add_argument('-l', '--logs', action='append', help='Log files list')
parser.add_argument('-c', '--config', dest='config')
args = parser.parse_args()
logging.basicConfig(
level="DEBUG" if args.debug else "INFO",
format='%(asctime)s [%(levelname)s] [Volta Post-loader] %(filename)s:%(lineno)d %(message)s')
config = {}
PACKAGE_SCHEMA_PATH = 'volta.core'
if not args.config:
raise RuntimeError('config should be specified')
if not args.logs:
raise RuntimeError('Empty log list')
with open(args.config, 'r') as cfg_stream:
try:
config = VoltaConfig(yaml.load(cfg_stream), DYNAMIC_OPTIONS, PACKAGE_SCHEMA_PATH)
except Exception:
raise RuntimeError('Config file not in yaml or malformed')
uploader = DataUploader(config)
uploader.create_job()
for log in args.logs:
try:
with open(log, 'r') as logname:
meta = json.loads(logname.readline())
except ValueError:
logger.warning('Skipped data file: no json header in logfile %s or json malformed...', log)
logger.debug('Skipped data file: no json header in logfile %s or json malformed', log, exc_info=True)
continue
else:
df = pd.read_csv(log, sep='\t', skiprows=1, names=meta['names'], dtype=meta['dtypes'])
logger.info('Uploading %s, meta type: %s', log, meta['type'])
uploader.put(df, meta['type'])
logger.info('Updating job metadata...')
try:
update_job_data = {
'test_id': config.get_option('core', 'test_id'),
'name': config.get_option('uploader', 'name'),
'dsc': config.get_option('uploader', 'dsc'),
'device_id': config.get_option('uploader', 'device_id'),
'device_model': config.get_option('uploader', 'device_model'),
'device_os': config.get_option('uploader', 'device_os'),
'app': config.get_option('uploader', 'app'),
'ver': config.get_option('uploader', 'ver'),
'meta': config.get_option('uploader', 'meta'),
'task': config.get_option('uploader', 'task'),
}
uploader.update_job(update_job_data)
except Exception:
logger.warning('Exception updating metadata')
uploader.close()
logger.info('Done!')
| mpl-2.0 |
mugizico/scikit-learn | sklearn/neighbors/nearest_centroid.py | 199 | 7249 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
meghana1995/sympy | examples/intermediate/mplot3d.py | 93 | 1252 | #!/usr/bin/env python
"""Matplotlib 3D plotting example
Demonstrates plotting with matplotlib.
"""
import sys
from sample import sample
from sympy import sin, Symbol
from sympy.external import import_module
def mplot3d(f, var1, var2, show=True):
"""
Plot a 3d function using matplotlib/Tk.
"""
import warnings
warnings.filterwarnings("ignore", "Could not match \S")
p = import_module('pylab')
# Try newer version first
p3 = import_module('mpl_toolkits.mplot3d',
__import__kwargs={'fromlist': ['something']}) or import_module('matplotlib.axes3d')
if not p or not p3:
sys.exit("Matplotlib is required to use mplot3d.")
x, y, z = sample(f, var1, var2)
fig = p.figure()
ax = p3.Axes3D(fig)
# ax.plot_surface(x, y, z, rstride=2, cstride=2)
ax.plot_wireframe(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if show:
p.show()
def main():
x = Symbol('x')
y = Symbol('y')
mplot3d(x**2 - y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(x**2+y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(sin(x)+sin(y), (x, -3.14, 3.14, 10), (y, -3.14, 3.14, 10))
if __name__ == "__main__":
main()
| bsd-3-clause |
hlin117/statsmodels | docs/source/conf.py | 27 | 11559 | # -*- coding: utf-8 -*-
#
# statsmodels documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 22 11:17:58 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../sphinxext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.pngmath', 'sphinx.ext.viewcode', 'sphinx.ext.autosummary',
'sphinx.ext.inheritance_diagram',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.only_directives',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
'numpy_ext.numpydoc',
'github' # for GitHub links
]
import sphinx
if sphinx.__version__ == '1.1.3':
print ("WARNING: Not building inheritance diagrams on sphinx 1.1.3. "
"See https://github.com/statsmodels/statsmodels/issues/1002")
extensions.remove('sphinx.ext.inheritance_diagram')
# plot_directive is broken on old matplotlib
from matplotlib import __version__ as mpl_version
from distutils.version import LooseVersion
if LooseVersion(mpl_version) < LooseVersion('1.0.1'):
extensions.remove('matplotlib.sphinxext.plot_directive')
extensions.append('numpy_ext.plot_directive')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'statsmodels'
copyright = u'2009-2013, Josef Perktold, Skipper Seabold, Jonathan Taylor, statsmodels-developers'
autosummary_generate = True
autoclass_content = 'class'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from statsmodels.version import short_version, full_version
release = short_version
# The full version, including dev tag.
version = full_version
# set inheritance_graph_attrs
# you need graphviz installed to use this
# see: http://sphinx.pocoo.org/ext/inheritance.html
# and graphviz dot documentation http://www.graphviz.org/content/attrs
#NOTE: giving the empty string to size allows graphviz to figure out
# the size
inheritance_graph_attrs = dict(size='""', ratio="compress", fontsize=14,
rankdir="LR")
#inheritance_node_attrs = dict(shape='ellipse', fontsize=14, height=0.75,
# color='dodgerblue1', style='filled')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['*/autosummary/class.rst', '*/autosummary/glmfamilies.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
if 'htmlhelp' in sys.argv:
#html_theme = 'statsmodels_htmlhelp' #doesn't look nice yet
html_theme = 'default'
print '################# using statsmodels_htmlhelp ############'
else:
html_theme = 'statsmodels'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'images/statsmodels_hybi_banner.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'images/statsmodels_hybi_favico.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'index' : ['indexsidebar.html','searchbox.html','sidelinks.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'statsmodelsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'statsmodels.tex', u'statsmodels Documentation',
u'Josef Perktold, Skipper Seabold', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# pngmath options
# http://sphinx-doc.org/ext/math.html#module-sphinx.ext.pngmath
pngmath_latex_preamble=r'\usepackage[active]{preview}' # + other custom stuff for inline math, such as non-default math fonts etc.
pngmath_use_preview=True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'statsmodels', u'statsmodels Documentation',
[u'Josef Perktold, Skipper Seabold, Jonathan Taylor'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'statsmodels'
epub_author = u'Josef Perktold, Skipper Seabold'
epub_publisher = u'Josef Perktold, Skipper Seabold'
epub_copyright = u'2009-2013, Josef Perktold, Skipper Seabold, Jonathan Taylor, statsmodels-developers'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'numpy' : ('http://docs.scipy.org/doc/numpy/', None),
'python' : ('http://docs.python.org/3.2', None),
'pydagogue' : ('http://matthew-brett.github.io/pydagogue/', None),
'patsy' : ('http://patsy.readthedocs.org/en/latest/', None),
'pandas' : ('http://pandas.pydata.org/pandas-docs/dev/', None),
}
from os.path import dirname, abspath, join
plot_basedir = join(dirname(dirname(os.path.abspath(__file__))), 'source')
# ghissue config
github_project_url = "https://github.com/statsmodels/statsmodels"
# for the examples landing page
import json
example_context = json.load(open('examples/landing.json'))
html_context = {'examples': example_context }
| bsd-3-clause |
cl4rke/scikit-learn | sklearn/datasets/tests/test_base.py | 205 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
simon-pepin/scikit-learn | examples/decomposition/plot_incremental_pca.py | 244 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
jennyzhang0215/incubator-mxnet | example/kaggle-ndsb1/training_curves.py | 52 | 1879 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
## based on https://github.com/dmlc/mxnet/issues/1302
## Parses the model fit log file and generates a train/val vs epoch plot
import matplotlib.pyplot as plt
import numpy as np
import re
import argparse
parser = argparse.ArgumentParser(description='Parses log file and generates train/val curves')
parser.add_argument('--log-file', type=str,default="log_tr_va",
help='the path of log file')
args = parser.parse_args()
TR_RE = re.compile('.*?]\sTrain-accuracy=([\d\.]+)')
VA_RE = re.compile('.*?]\sValidation-accuracy=([\d\.]+)')
log = open(args.log_file).read()
log_tr = [float(x) for x in TR_RE.findall(log)]
log_va = [float(x) for x in VA_RE.findall(log)]
idx = np.arange(len(log_tr))
plt.figure(figsize=(8, 6))
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.plot(idx, log_tr, 'o', linestyle='-', color="r",
label="Train accuracy")
plt.plot(idx, log_va, 'o', linestyle='-', color="b",
label="Validation accuracy")
plt.legend(loc="best")
plt.xticks(np.arange(min(idx), max(idx)+1, 5))
plt.yticks(np.arange(0, 1, 0.2))
plt.ylim([0,1])
plt.show()
| apache-2.0 |
toobaz/pandas | pandas/tests/indexes/multi/test_sorting.py | 2 | 8261 | import numpy as np
import pytest
from pandas.errors import PerformanceWarning, UnsortedIndexError
import pandas as pd
from pandas import CategoricalIndex, DataFrame, Index, MultiIndex, RangeIndex
import pandas.util.testing as tm
def test_sortlevel(idx):
import random
tuples = list(idx)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining():
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
sorted_idx, _ = mi.sortlevel("A", sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic():
tuples = [
("bar", "one"),
("foo", "two"),
("qux", "two"),
("foo", "one"),
("baz", "two"),
("qux", "one"),
]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sort(indices):
with pytest.raises(TypeError):
indices.sort()
def test_numpy_argsort(idx):
result = np.argsort(idx)
expected = idx.argsort()
tm.assert_numpy_array_equal(result, expected)
# these are the only two types that perform
# pandas compatibility input validation - the
# rest already perform separate (or no) such
# validation via their 'values' attribute as
# defined in pandas.core.indexes/base.py - they
# cannot be changed at the moment due to
# backwards compatibility concerns
if isinstance(type(idx), (CategoricalIndex, RangeIndex)):
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(idx, axis=1)
msg = "the 'kind' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(idx, kind="mergesort")
msg = "the 'order' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(idx, order=("a", "b"))
def test_unsortedindex():
# GH 11897
mi = pd.MultiIndex.from_tuples(
[("z", "a"), ("x", "a"), ("y", "b"), ("x", "b"), ("y", "a"), ("z", "b")],
names=["one", "two"],
)
df = pd.DataFrame([[i, 10 * i] for i in range(6)], index=mi, columns=["one", "two"])
# GH 16734: not sorted, but no real slicing
result = df.loc(axis=0)["z", "a"]
expected = df.iloc[0]
tm.assert_series_equal(result, expected)
with pytest.raises(UnsortedIndexError):
df.loc(axis=0)["z", slice("a")]
df.sort_index(inplace=True)
assert len(df.loc(axis=0)["z", :]) == 2
with pytest.raises(KeyError, match="'q'"):
df.loc(axis=0)["q", :]
def test_unsortedindex_doc_examples():
# http://pandas.pydata.org/pandas-docs/stable/advanced.html#sorting-a-multiindex # noqa
dfm = DataFrame(
{"jim": [0, 0, 1, 1], "joe": ["x", "x", "z", "y"], "jolie": np.random.rand(4)}
)
dfm = dfm.set_index(["jim", "joe"])
with tm.assert_produces_warning(PerformanceWarning):
dfm.loc[(1, "z")]
with pytest.raises(UnsortedIndexError):
dfm.loc[(0, "y"):(1, "z")]
assert not dfm.index.is_lexsorted()
assert dfm.index.lexsort_depth == 1
# sort it
dfm = dfm.sort_index()
dfm.loc[(1, "z")]
dfm.loc[(0, "y"):(1, "z")]
assert dfm.index.is_lexsorted()
assert dfm.index.lexsort_depth == 2
def test_reconstruct_sort():
# starts off lexsorted & monotonic
mi = MultiIndex.from_arrays([["A", "A", "B", "B", "B"], [1, 2, 1, 2, 3]])
assert mi.is_lexsorted()
assert mi.is_monotonic
recons = mi._sort_levels_monotonic()
assert recons.is_lexsorted()
assert recons.is_monotonic
assert mi is recons
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
# cannot convert to lexsorted
mi = pd.MultiIndex.from_tuples(
[("z", "a"), ("x", "a"), ("y", "b"), ("x", "b"), ("y", "a"), ("z", "b")],
names=["one", "two"],
)
assert not mi.is_lexsorted()
assert not mi.is_monotonic
recons = mi._sort_levels_monotonic()
assert not recons.is_lexsorted()
assert not recons.is_monotonic
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
# cannot convert to lexsorted
mi = MultiIndex(
levels=[["b", "d", "a"], [1, 2, 3]],
codes=[[0, 1, 0, 2], [2, 0, 0, 1]],
names=["col1", "col2"],
)
assert not mi.is_lexsorted()
assert not mi.is_monotonic
recons = mi._sort_levels_monotonic()
assert not recons.is_lexsorted()
assert not recons.is_monotonic
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
def test_reconstruct_remove_unused():
# xref to GH 2770
df = DataFrame(
[["deleteMe", 1, 9], ["keepMe", 2, 9], ["keepMeToo", 3, 9]],
columns=["first", "second", "third"],
)
df2 = df.set_index(["first", "second"], drop=False)
df2 = df2[df2["first"] != "deleteMe"]
# removed levels are there
expected = MultiIndex(
levels=[["deleteMe", "keepMe", "keepMeToo"], [1, 2, 3]],
codes=[[1, 2], [1, 2]],
names=["first", "second"],
)
result = df2.index
tm.assert_index_equal(result, expected)
expected = MultiIndex(
levels=[["keepMe", "keepMeToo"], [2, 3]],
codes=[[0, 1], [0, 1]],
names=["first", "second"],
)
result = df2.index.remove_unused_levels()
tm.assert_index_equal(result, expected)
# idempotent
result2 = result.remove_unused_levels()
tm.assert_index_equal(result2, expected)
assert result2.is_(result)
@pytest.mark.parametrize(
"first_type,second_type", [("int64", "int64"), ("datetime64[D]", "str")]
)
def test_remove_unused_levels_large(first_type, second_type):
# GH16556
# because tests should be deterministic (and this test in particular
# checks that levels are removed, which is not the case for every
# random input):
rng = np.random.RandomState(4) # seed is arbitrary value that works
size = 1 << 16
df = DataFrame(
dict(
first=rng.randint(0, 1 << 13, size).astype(first_type),
second=rng.randint(0, 1 << 10, size).astype(second_type),
third=rng.rand(size),
)
)
df = df.groupby(["first", "second"]).sum()
df = df[df.third < 0.1]
result = df.index.remove_unused_levels()
assert len(result.levels[0]) < len(df.index.levels[0])
assert len(result.levels[1]) < len(df.index.levels[1])
assert result.equals(df.index)
expected = df.reset_index().set_index(["first", "second"]).index
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("level0", [["a", "d", "b"], ["a", "d", "b", "unused"]])
@pytest.mark.parametrize(
"level1", [["w", "x", "y", "z"], ["w", "x", "y", "z", "unused"]]
)
def test_remove_unused_nan(level0, level1):
# GH 18417
mi = pd.MultiIndex(
levels=[level0, level1], codes=[[0, 2, -1, 1, -1], [0, 1, 2, 3, 2]]
)
result = mi.remove_unused_levels()
tm.assert_index_equal(result, mi)
for level in 0, 1:
assert "unused" not in result.levels[level]
def test_argsort(idx):
result = idx.argsort()
expected = idx.values.argsort()
tm.assert_numpy_array_equal(result, expected)
| bsd-3-clause |
pravsripad/mne-python | mne/fixes.py | 4 | 37120 | """Compatibility fixes for older versions of libraries
If you add content to this file, please give the version of the package
at which the fix is no longer needed.
# originally copied from scikit-learn
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD
from distutils.version import LooseVersion
import functools
import inspect
from math import log
import os
from pathlib import Path
import warnings
import numpy as np
###############################################################################
# Misc
def _median_complex(data, axis):
"""Compute marginal median on complex data safely.
XXX: Can be removed when numpy introduces a fix.
See: https://github.com/scipy/scipy/pull/12676/.
"""
# np.median must be passed real arrays for the desired result
if np.iscomplexobj(data):
data = (np.median(np.real(data), axis=axis)
+ 1j * np.median(np.imag(data), axis=axis))
else:
data = np.median(data, axis=axis)
return data
# helpers to get function arguments
def _get_args(function, varargs=False):
params = inspect.signature(function).parameters
args = [key for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)]
if varargs:
varargs = [param.name for param in params.values()
if param.kind == param.VAR_POSITIONAL]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
def _safe_svd(A, **kwargs):
"""Wrapper to get around the SVD did not converge error of death"""
# Intel has a bug with their GESVD driver:
# https://software.intel.com/en-us/forums/intel-distribution-for-python/topic/628049 # noqa: E501
# For SciPy 0.18 and up, we can work around it by using
# lapack_driver='gesvd' instead.
from scipy import linalg
if kwargs.get('overwrite_a', False):
raise ValueError('Cannot set overwrite_a=True with this function')
try:
return linalg.svd(A, **kwargs)
except np.linalg.LinAlgError as exp:
from .utils import warn
if 'lapack_driver' in _get_args(linalg.svd):
warn('SVD error (%s), attempting to use GESVD instead of GESDD'
% (exp,))
return linalg.svd(A, lapack_driver='gesvd', **kwargs)
else:
raise
def _csc_matrix_cast(x):
from scipy.sparse import csc_matrix
return csc_matrix(x)
###############################################################################
# Backporting nibabel's read_geometry
def _get_read_geometry():
"""Get the geometry reading function."""
try:
import nibabel as nib
has_nibabel = True
except ImportError:
has_nibabel = False
if has_nibabel:
from nibabel.freesurfer import read_geometry
else:
read_geometry = _read_geometry
return read_geometry
def _read_geometry(filepath, read_metadata=False, read_stamp=False):
"""Backport from nibabel."""
from .surface import _fread3, _fread3_many
volume_info = dict()
TRIANGLE_MAGIC = 16777214
QUAD_MAGIC = 16777215
NEW_QUAD_MAGIC = 16777213
with open(filepath, "rb") as fobj:
magic = _fread3(fobj)
if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC): # Quad file
nvert = _fread3(fobj)
nquad = _fread3(fobj)
(fmt, div) = (">i2", 100.) if magic == QUAD_MAGIC else (">f4", 1.)
coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float64) / div
coords = coords.reshape(-1, 3)
quads = _fread3_many(fobj, nquad * 4)
quads = quads.reshape(nquad, 4)
#
# Face splitting follows
#
faces = np.zeros((2 * nquad, 3), dtype=np.int64)
nface = 0
for quad in quads:
if (quad[0] % 2) == 0:
faces[nface] = quad[0], quad[1], quad[3]
nface += 1
faces[nface] = quad[2], quad[3], quad[1]
nface += 1
else:
faces[nface] = quad[0], quad[1], quad[2]
nface += 1
faces[nface] = quad[0], quad[2], quad[3]
nface += 1
elif magic == TRIANGLE_MAGIC: # Triangle file
create_stamp = fobj.readline().rstrip(b'\n').decode('utf-8')
fobj.readline()
vnum = np.fromfile(fobj, ">i4", 1)[0]
fnum = np.fromfile(fobj, ">i4", 1)[0]
coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3)
faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3)
if read_metadata:
volume_info = _read_volume_info(fobj)
else:
raise ValueError("File does not appear to be a Freesurfer surface")
coords = coords.astype(np.float64) # XXX: due to mayavi bug on mac 32bits
ret = (coords, faces)
if read_metadata:
if len(volume_info) == 0:
warnings.warn('No volume information contained in the file')
ret += (volume_info,)
if read_stamp:
ret += (create_stamp,)
return ret
###############################################################################
# Triaging FFT functions to get fast pocketfft (SciPy 1.4)
@functools.lru_cache(None)
def _import_fft(name):
single = False
if not isinstance(name, tuple):
name = (name,)
single = True
try:
from scipy.fft import rfft # noqa analysis:ignore
except ImportError:
from numpy import fft # noqa
else:
from scipy import fft # noqa
out = [getattr(fft, n) for n in name]
if single:
out = out[0]
return out
###############################################################################
# NumPy Generator (NumPy 1.17)
def rng_uniform(rng):
"""Get the unform/randint from the rng."""
# prefer Generator.integers, fall back to RandomState.randint
return getattr(rng, 'integers', getattr(rng, 'randint', None))
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
###############################################################################
# Misc utilities
# get_fdata() requires knowing the dtype ahead of time, so let's triage on our
# own instead
def _get_img_fdata(img):
data = np.asanyarray(img.dataobj)
dtype = np.complex128 if np.iscomplexobj(data) else np.float64
return data.astype(dtype)
def _read_volume_info(fobj):
"""An implementation of nibabel.freesurfer.io._read_volume_info, since old
versions of nibabel (<=2.1.0) don't have it.
"""
volume_info = dict()
head = np.fromfile(fobj, '>i4', 1)
if not np.array_equal(head, [20]): # Read two bytes more
head = np.concatenate([head, np.fromfile(fobj, '>i4', 2)])
if not np.array_equal(head, [2, 0, 20]):
warnings.warn("Unknown extension code.")
return volume_info
volume_info['head'] = head
for key in ['valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',
'zras', 'cras']:
pair = fobj.readline().decode('utf-8').split('=')
if pair[0].strip() != key or len(pair) != 2:
raise IOError('Error parsing volume info.')
if key in ('valid', 'filename'):
volume_info[key] = pair[1].strip()
elif key == 'volume':
volume_info[key] = np.array(pair[1].split()).astype(int)
else:
volume_info[key] = np.array(pair[1].split()).astype(float)
# Ignore the rest
return volume_info
def _serialize_volume_info(volume_info):
"""An implementation of nibabel.freesurfer.io._serialize_volume_info, since
old versions of nibabel (<=2.1.0) don't have it."""
keys = ['head', 'valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',
'zras', 'cras']
diff = set(volume_info.keys()).difference(keys)
if len(diff) > 0:
raise ValueError('Invalid volume info: %s.' % diff.pop())
strings = list()
for key in keys:
if key == 'head':
if not (np.array_equal(volume_info[key], [20]) or np.array_equal(
volume_info[key], [2, 0, 20])):
warnings.warn("Unknown extension code.")
strings.append(np.array(volume_info[key], dtype='>i4').tobytes())
elif key in ('valid', 'filename'):
val = volume_info[key]
strings.append('{} = {}\n'.format(key, val).encode('utf-8'))
elif key == 'volume':
val = volume_info[key]
strings.append('{} = {} {} {}\n'.format(
key, val[0], val[1], val[2]).encode('utf-8'))
else:
val = volume_info[key]
strings.append('{} = {:0.10g} {:0.10g} {:0.10g}\n'.format(
key.ljust(6), val[0], val[1], val[2]).encode('utf-8'))
return b''.join(strings)
##############################################################################
# adapted from scikit-learn
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "regressor"
_DEFAULT_TAGS = {
'non_deterministic': False,
'requires_positive_X': False,
'requires_positive_y': False,
'X_types': ['2darray'],
'poor_score': False,
'no_validation': False,
'multioutput': False,
"allow_nan": False,
'stateless': False,
'multilabel': False,
'_skip_test': False,
'_xfail_checks': False,
'multioutput_only': False,
'binary_only': False,
'requires_fit': True,
'preserves_dtype': [np.float64],
'requires_y': False,
'pairwise': False,
}
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn.
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = inspect.signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : bool, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The latter have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Parameters
----------
**params : dict
Parameters.
Returns
-------
inst : instance
The object.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in params.items():
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
from sklearn.base import _pprint
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
# __getstate__ and __setstate__ are omitted because they only contain
# conditionals that are not satisfied by our objects (e.g.,
# ``if type(self).__module__.startswith('sklearn.')``.
def _more_tags(self):
return _DEFAULT_TAGS
def _get_tags(self):
collected_tags = {}
for base_class in reversed(inspect.getmro(self.__class__)):
if hasattr(base_class, '_more_tags'):
# need the if because mixins might not have _more_tags
# but might do redundant work in estimators
# (i.e. calling more tags on BaseEstimator multiple times)
more_tags = base_class._more_tags(self)
collected_tags.update(more_tags)
return collected_tags
# newer sklearn deprecates importing from sklearn.metrics.scoring,
# but older sklearn does not expose check_scoring in sklearn.metrics.
def _get_check_scoring():
try:
from sklearn.metrics import check_scoring # noqa
except ImportError:
from sklearn.metrics.scorer import check_scoring # noqa
return check_scoring
def _check_fit_params(X, fit_params, indices=None):
"""Check and validate the parameters passed during `fit`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data array.
fit_params : dict
Dictionary containing the parameters passed at fit.
indices : array-like of shape (n_samples,), default=None
Indices to be selected if the parameter has the same size as
`X`.
Returns
-------
fit_params_validated : dict
Validated parameters. We ensure that the values support
indexing.
"""
try:
from sklearn.utils.validation import \
_check_fit_params as _sklearn_check_fit_params
return _sklearn_check_fit_params(X, fit_params, indices)
except ImportError:
from sklearn.model_selection import _validation
fit_params_validated = \
{k: _validation._index_param_value(X, v, indices)
for k, v in fit_params.items()}
return fit_params_validated
###############################################################################
# Copied from sklearn to simplify code paths
def empirical_covariance(X, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator
Read more in the :ref:`User Guide <covariance>`.
Parameters
----------
store_precision : bool
Specifies if the estimated precision is stored.
assume_centered : bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
covariance_ : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix
precision_ : 2D ndarray, shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
"""
def __init__(self, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
from scipy import linalg
# covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like,
The precision matrix associated to the current covariance object.
"""
from scipy import linalg
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
def fit(self, X, y=None):
"""Fit the Maximum Likelihood Estimator covariance model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : ndarray | None
Not used, present for API consistency.
Returns
-------
self : object
Returns self.
""" # noqa: E501
# X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Compute the log-likelihood of a Gaussian dataset.
Uses ``self.covariance_`` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : ndarray | None
Not used, present for API consistency.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
# compute empirical covariance of the test set
test_cov = empirical_covariance(
X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm='frobenius', scaling=True,
squared=True):
"""Computes the Mean Squared Error between two covariance estimators.
Parameters
----------
comp_cov : array-like, shape = [n_features, n_features]
The covariance to compare with.
norm : str
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
from scipy import linalg
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented")
# optionally scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, observations):
"""Computes the squared Mahalanobis distances of given observations.
Parameters
----------
observations : array-like, shape = [n_observations, n_features]
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
mahalanobis_distance : array, shape = [n_observations,]
Squared Mahalanobis distances of the observations.
"""
precision = self.get_precision()
# compute mahalanobis distances
centered_obs = observations - self.location_
mahalanobis_dist = np.sum(
np.dot(centered_obs, precision) * centered_obs, 1)
return mahalanobis_dist
def log_likelihood(emp_cov, precision):
"""Computes the sample mean of the log_likelihood under a covariance model
computes the empirical expected log-likelihood (accounting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
sample mean of the log-likelihood
"""
p = precision.shape[0]
log_likelihood_ = - np.sum(emp_cov * precision) + _logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.
return log_likelihood_
# sklearn uses np.linalg for this, but ours is more robust to zero eigenvalues
def _logdet(A):
"""Compute the log det of a positive semidefinite matrix."""
from scipy import linalg
vals = linalg.eigvalsh(A)
# avoid negative (numerical errors) or zero (semi-definite matrix) values
tol = vals.max() * vals.size * np.finfo(np.float64).eps
vals = np.where(vals > tol, vals, tol)
return np.sum(np.log(vals))
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
def _assess_dimension_(spectrum, rank, n_samples, n_features):
from scipy.special import gammaln
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.) -
log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def svd_flip(u, v, u_based_decision=True):
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, np.arange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[np.arange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum
Parameters
----------
arr : array-like
To be cumulatively summed as flat
axis : int, optional
Axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
rtol : float
Relative tolerance, see ``np.allclose``
atol : float
Absolute tolerance, see ``np.allclose``
"""
out = np.cumsum(arr, axis=axis, dtype=np.float64)
expected = np.sum(arr, axis=axis, dtype=np.float64)
if not np.all(np.isclose(out.take(-1, axis=axis), expected, rtol=rtol,
atol=atol, equal_nan=True)):
warnings.warn('cumsum was found to be unstable: '
'its last element does not correspond to sum',
RuntimeWarning)
return out
# This shim can be removed once NumPy 1.19.0+ is required (1.18.4 has sign bug)
def svd(a, hermitian=False):
if hermitian: # faster
s, u = np.linalg.eigh(a)
sgn = np.sign(s)
s = np.abs(s)
sidx = np.argsort(s)[..., ::-1]
sgn = take_along_axis(sgn, sidx, axis=-1)
s = take_along_axis(s, sidx, axis=-1)
u = take_along_axis(u, sidx[..., None, :], axis=-1)
# singular values are unsigned, move the sign into v
vt = (u * sgn[..., np.newaxis, :]).swapaxes(-2, -1).conj()
np.abs(s, out=s)
return u, s, vt
else:
return np.linalg.svd(a)
###############################################################################
# NumPy einsum backward compat (allow "optimize" arg and fix 1.14.0 bug)
# XXX eventually we should hand-tune our `einsum` calls given our array sizes!
def einsum(*args, **kwargs):
if 'optimize' not in kwargs:
kwargs['optimize'] = False
return np.einsum(*args, **kwargs)
try:
from numpy import take_along_axis
except ImportError: # NumPy < 1.15
def take_along_axis(arr, indices, axis):
# normalize inputs
if axis is None:
arr = arr.flat
arr_shape = (len(arr),) # flatiter has no .shape
axis = 0
else:
# there is a NumPy function for this, but rather than copy our
# internal uses should be correct, so just normalize quickly
if axis < 0:
axis += arr.ndim
assert 0 <= axis < arr.ndim
arr_shape = arr.shape
# use the fancy index
return arr[_make_along_axis_idx(arr_shape, indices, axis)]
def _make_along_axis_idx(arr_shape, indices, axis):
# compute dimensions to iterate over
if not np.issubdtype(indices.dtype, np.integer):
raise IndexError('`indices` must be an integer array')
if len(arr_shape) != indices.ndim:
raise ValueError(
"`indices` and `arr` must have the same number of dimensions")
shape_ones = (1,) * indices.ndim
dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim))
# build a fancy index, consisting of orthogonal aranges, with the
# requested index inserted at the right location
fancy_index = []
for dim, n in zip(dest_dims, arr_shape):
if dim is None:
fancy_index.append(indices)
else:
ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:]
fancy_index.append(np.arange(n).reshape(ind_shape))
return tuple(fancy_index)
###############################################################################
# From nilearn
def _crop_colorbar(cbar, cbar_vmin, cbar_vmax):
"""
crop a colorbar to show from cbar_vmin to cbar_vmax
Used when symmetric_cbar=False is used.
"""
import matplotlib
if (cbar_vmin is None) and (cbar_vmax is None):
return
cbar_tick_locs = cbar.locator.locs
if cbar_vmax is None:
cbar_vmax = cbar_tick_locs.max()
if cbar_vmin is None:
cbar_vmin = cbar_tick_locs.min()
new_tick_locs = np.linspace(cbar_vmin, cbar_vmax,
len(cbar_tick_locs))
# matplotlib >= 3.2.0 no longer normalizes axes between 0 and 1
# See https://matplotlib.org/3.2.1/api/prev_api_changes/api_changes_3.2.0.html
# _outline was removed in
# https://github.com/matplotlib/matplotlib/commit/03a542e875eba091a027046d5ec652daa8be6863
# so we use the code from there
if LooseVersion(matplotlib.__version__) >= LooseVersion("3.2.0"):
cbar.ax.set_ylim(cbar_vmin, cbar_vmax)
X, _ = cbar._mesh()
X = np.array([X[0], X[-1]])
Y = np.array([[cbar_vmin, cbar_vmin], [cbar_vmax, cbar_vmax]])
N = X.shape[0]
ii = [0, 1, N - 2, N - 1, 2 * N - 1, 2 * N - 2, N + 1, N, 0]
x = X.T.reshape(-1)[ii]
y = Y.T.reshape(-1)[ii]
xy = (np.column_stack([y, x])
if cbar.orientation == 'horizontal' else
np.column_stack([x, y]))
cbar.outline.set_xy(xy)
else:
cbar.ax.set_ylim(cbar.norm(cbar_vmin), cbar.norm(cbar_vmax))
outline = cbar.outline.get_xy()
outline[:2, 1] += cbar.norm(cbar_vmin)
outline[2:6, 1] -= (1. - cbar.norm(cbar_vmax))
outline[6:, 1] += cbar.norm(cbar_vmin)
cbar.outline.set_xy(outline)
cbar.set_ticks(new_tick_locs, update_ticks=True)
###############################################################################
# Matplotlib
def _get_status(checks):
"""Deal with old MPL to get check box statuses."""
try:
return list(checks.get_status())
except AttributeError:
return [x[0].get_visible() for x in checks.lines]
###############################################################################
# Numba (optional requirement)
# Here we choose different defaults to speed things up by default
try:
import numba
if LooseVersion(numba.__version__) < LooseVersion('0.40'):
raise ImportError
prange = numba.prange
def jit(nopython=True, nogil=True, fastmath=True, cache=True,
**kwargs): # noqa
return numba.jit(nopython=nopython, nogil=nogil, fastmath=fastmath,
cache=cache, **kwargs)
except ImportError:
has_numba = False
else:
has_numba = (os.getenv('MNE_USE_NUMBA', 'true').lower() == 'true')
if not has_numba:
def jit(**kwargs): # noqa
def _jit(func):
return func
return _jit
prange = range
bincount = np.bincount
mean = np.mean
else:
@jit()
def bincount(x, weights, minlength): # noqa: D103
out = np.zeros(minlength)
for idx, w in zip(x, weights):
out[idx] += w
return out
# fix because Numba does not support axis kwarg for mean
@jit()
def _np_apply_along_axis(func1d, axis, arr):
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
result = np.empty(arr.shape[1])
for i in range(len(result)):
result[i] = func1d(arr[:, i])
else:
result = np.empty(arr.shape[0])
for i in range(len(result)):
result[i] = func1d(arr[i, :])
return result
@jit()
def mean(array, axis):
return _np_apply_along_axis(np.mean, axis, array)
###############################################################################
# Added in Python 3.7 (remove when we drop support for 3.6)
try:
from contextlib import nullcontext
except ImportError:
from contextlib import contextmanager
@contextmanager
def nullcontext(enter_result=None):
yield enter_result
| bsd-3-clause |
saketkc/gencode_regions | genepred_to_bed.py | 1 | 4715 | #!/usr/bin/env python
"""
Extract first/last exon and CDS from GenePred formatted file
"""
from __future__ import print_function
import re
import pandas
import argparse
import sys
def extract_first_coding_exon(row):
cdsStart = int(row["cdsStart"])
cdsEnd = int(row["cdsEnd"])
strand = row["strand"]
exonStarts = row["exonStarts"]
exonEnds = row["exonEnds"]
## Noncoding
if cdsStart == cdsEnd:
return
exonStarts_all = exonStarts.split(",")
exonEnds_all = exonEnds.split(",")
exonEnds_all = [int(x) for x in exonEnds_all if x]
exonStarts_all = [int(x) for x in exonStarts_all if x]
name = "{}".format(row["name"])
if strand == "+":
for i in range(0, len(exonEnds_all)):
## If the end of exon is greater than the start of CDS
if exonEnds_all[i] >= cdsStart:
name = re.sub(r"\.[0-9]+", "", name)
return "{}\t{}\t{}\t{}\t.\t{}".format(
row["chrom"], cdsStart, exonEnds_all[i], name, strand
)
if strand == "-":
for i in range(len(exonEnds_all) - 1, 0, -1):
if exonStarts_all[i] <= cdsEnd:
name = re.sub(r"\.[0-9]+", "", name)
return "{}\t{}\t{}\t{}\t.\t{}".format(
row["chrom"], exonStarts_all[i], cdsEnd, name, strand
)
def extract_last_coding_exon(row):
cdsStart = int(row["cdsStart"])
cdsEnd = int(row["cdsEnd"])
strand = row["strand"]
exonStarts = row["exonStarts"]
exonEnds = row["exonEnds"]
## Noncoding
if cdsStart == cdsEnd:
return
exonStarts_all = exonStarts.split(",")
exonEnds_all = exonEnds.split(",")
exonEnds_all = [int(x) for x in exonEnds_all if x]
exonStarts_all = [int(x) for x in exonStarts_all if x]
name = "{}".format(row["name"])
if strand == "+":
for i in range(len(exonEnds_all) - 1, 0, -1):
## If the end of exon is greater than the start of CDS
if exonEnds_all[i] <= cdsEnd and exonStarts_all[i] >= cdsStart:
name = re.sub(r"\.[0-9]+", "", name)
return "{}\t{}\t{}\t{}\t.\t{}".format(
row["chrom"], exonStarts_all[i], exonEnds_all[i], name, strand
)
if strand == "-":
for i in range(0, len(exonStarts_all)):
if exonStarts_all[i] >= cdsStart and exonEnds_all[i] <= cdsEnd:
name = re.sub(r"\.[0-9]+", "", name)
return "{}\t{}\t{}\t{}\t.\t{}".format(
row["chrom"], exonStarts_all[i], exonEnds_all[i], name, strand
)
def get_CDS(row):
cdsStart = int(row["cdsStart"])
cdsEnd = int(row["cdsEnd"])
strand = row["strand"]
name = "{}".format(row["name"])
if (-cdsStart + cdsEnd) > 0:
name = re.sub(r"\.[0-9]+", "", name)
return "{}\t{}\t{}\t{}\t.\t{}".format(
row["chrom"], cdsStart, cdsEnd, name, strand
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--first_exon", help="Output first exon BED", action="store_true"
)
parser.add_argument("--last_exon", help="Output last exon BED", action="store_true")
parser.add_argument("--cds", help="Output CDS BED", action="store_true")
parser.add_argument("genepred", help="Path to GTF")
args = parser.parse_args()
if not (args.first_exon or args.last_exon or args.cds):
sys.stderr.write("Should select one of --first_exon, --last_exon, --cds")
sys.exit(1)
if args.first_exon:
fetch_func = extract_first_coding_exon
elif args.last_exon:
fetch_func = extract_last_coding_exon
elif args.cds:
fetch_func = get_CDS
df = pandas.read_table(args.genepred, header=None)
if len(df.columns) == 10:
df.columns = [
"name",
"chrom",
"strand",
"txStart",
"txEnd",
"cdsStart",
"cdsEnd",
"exonCount",
"exonStarts",
"exonEnds",
]
elif len(df.columns) == 15:
df.columns = [
"name",
"chrom",
"strand",
"txStart",
"txEnd",
"cdsStart",
"cdsEnd",
"exonCount",
"exonStarts",
"exonEnds",
"score",
"name2",
"cdsStartStat",
"cdsEndStat",
"exonFrames",
]
else:
raise RuntimeError("Input not in GenePred format")
for index, row in df.iterrows():
record = fetch_func(row)
if record:
print(record)
| bsd-2-clause |
momiah/cvariants_opencv | ensembleInputs.py | 1 | 4269 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 16:32:55 2015
@author: mdmiah
"""
import matplotlib as mpl
mpl.use('Agg') # Needed to work on server
import numpy as np
import random
import sys
import modelInputs
# ---------------------------------- ----------------------------------
def getRowsForImages(u, v, labels, brisks, colors):
X = []
y = []
meta = []
for i in xrange(u*4, (u*4)+4):
for j in xrange(v*4, (v*4)+4):
Xrow, yrow, metarow = modelInputs.getRowForCombination(i, j, labels, brisks, colors)
X.append(Xrow)
y.append(yrow)
meta.append(metarow)
return X, y, meta
def sampleVariant(labels, brisks, colors, start=0, end=None):
if end is None:
end = colors.shape[0]-1
# Choose an image at random
u = random.randint(start, end)
label1 = labels[u][1]
# Make sure minimum number of BRISK features exist
i = u*4 + np.arange(4)
brisk1 = brisks[i,1:]
# If ANY of them have brisk features of lower than the threshold, then skip
if np.sum( np.sum(brisk1,axis=1)<modelInputs.minBriskFeatures ):
return sampleVariant(labels, brisks, colors, start, end)
# Match with a color variant
for m in xrange(20): # Search nearby for a color variant
v = random.randint(u-50, u+50)
v = start if v<start else end if v>end else v
if u==v: # Don't match with itself
continue
label2 = labels[v][1]
# Make sure minimum number of BRISK features exist
j = v*4 + np.arange(4)
brisk2 = brisks[j,1:]
if np.sum( np.sum(brisk2,axis=1)<modelInputs.minBriskFeatures ):
continue
if label1==label2:
return getRowsForImages(u, v, labels, brisks, colors)
# If the randomly chosen image has no color variant,
# sample again
return sampleVariant(labels, brisks, colors)
def sampleNonVariant(labels, brisks, colors, start=0, end=None):
if end is None:
end = colors.shape[0]-1
# Choose an image at random
u = random.randint(start, end)
label1 = labels[u][1]
# Make sure minimum number of BRISK features exist
i = u*4 + np.arange(4)
brisk1 = brisks[i,1:]
# If ANY of them have brisk features of lower than the threshold, then skip
if np.sum( np.sum(brisk1,axis=1)<modelInputs.minBriskFeatures ):
return sampleNonVariant(labels, brisks, colors, start, end)
# Match random non variants
label2 = label1
while label1==label2:
v = random.randint(start, end)
# Make sure minimum number of BRISK features exist
j = v*4 + np.arange(4)
brisk2 = brisks[j,1:]
if np.sum( np.sum(brisk2,axis=1)<modelInputs.minBriskFeatures ):
continue
label2 = labels[v][1]
return getRowsForImages(u, v, labels, brisks, colors)
def save(no_of_pairs = 10000):
labels, brisks, colors = modelInputs.loadHists()
test_start_n = no_of_pairs * modelInputs.train_fraction # Fraction of dataset used in training
test_start_u = np.int((colors.shape[0]-1) * modelInputs.train_fraction) # Fraction of images used in training
with open("Cache/X1.csv", "w") as X_fh, open("Cache/y1.csv", "w") as y_fh, open("Cache/Xmeta1.csv", "w") as meta_fh:
for n in xrange(no_of_pairs):
if n<test_start_n:
start = 0
end = test_start_u-1
else:
start = test_start_u
end = None
X, y, meta = sampleVariant(labels, brisks, colors, start, end)
X2, y2, meta2 = sampleNonVariant(labels, brisks, colors, start, end)
X.extend(X2)
y.extend(y2)
meta.extend(meta2)
np.savetxt(X_fh, X, delimiter=",", fmt="%f")
np.savetxt(y_fh, y, delimiter=",", fmt="%d")
np.savetxt(meta_fh, meta, delimiter=",", fmt="%d")
if (n+1)%1000==0:
percentage_completion = 100.0*np.float(n+1)/no_of_pairs
sys.stdout.write(str(n+1)+" of "+str(no_of_pairs)+" done ("+str(percentage_completion)+"%)\r")
sys.stdout.flush()
print ""
| gpl-2.0 |
mlyundin/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
gechong/XlsxWriter | examples/pandas_datetime.py | 9 | 1758 | ##############################################################################
#
# An example of converting a Pandas dataframe with datetimes to an xlsx file
# with a default datetime and date format using Pandas and XlsxWriter.
#
# Copyright 2013-2015, John McNamara, jmcnamara@cpan.org
#
import pandas as pd
from datetime import datetime, date
# Create a Pandas dataframe from some datetime data.
df = pd.DataFrame({'Date and time': [datetime(2015, 1, 1, 11, 30, 55),
datetime(2015, 1, 2, 1, 20, 33),
datetime(2015, 1, 3, 11, 10 ),
datetime(2015, 1, 4, 16, 45, 35),
datetime(2015, 1, 5, 12, 10, 15)],
'Dates only': [date(2015, 2, 1),
date(2015, 2, 2),
date(2015, 2, 3),
date(2015, 2, 4),
date(2015, 2, 5)],
})
# Create a Pandas Excel writer using XlsxWriter as the engine.
# Also set the default datetime and date formats.
writer = pd.ExcelWriter("pandas_datetime.xlsx",
engine='xlsxwriter',
datetime_format='mmm d yyyy hh:mm:ss',
date_format='mmmm dd yyyy')
# Convert the dataframe to an XlsxWriter Excel object.
df.to_excel(writer, sheet_name='Sheet1')
# Get the xlsxwriter workbook and worksheet objects in order to set the column
# widths, to make the dates clearer.
workbook = writer.book
worksheet = writer.sheets['Sheet1']
worksheet.set_column('B:C', 20)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
| bsd-2-clause |
depet/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 7 | 3089 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from nose.tools import assert_equal
from nose.tools import assert_true
from nose.tools import assert_false
from numpy.testing import assert_array_equal
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
v = DictVectorizer(sparse=sparse, dtype=dtype)
X = v.fit_transform(D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(D).A)
else:
assert_array_equal(X, v.transform(D))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
FederatedAI/FATE | python/federatedml/evaluation/metrics/classification_metric.py | 1 | 21454 | import copy
import sys
import numpy as np
import pandas as pd
from scipy.stats import stats
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import average_precision_score
ROUND_NUM = 6
def neg_pos_count(labels: np.ndarray, pos_label: int):
pos_num = ((labels == pos_label) + 0).sum()
neg_num = len(labels) - pos_num
return pos_num, neg_num
def sort_score_and_label(labels: np.ndarray, pred_scores: np.ndarray):
labels = np.array(labels)
pred_scores = np.array(pred_scores)
sort_idx = np.flip(pred_scores.argsort())
sorted_labels = labels[sort_idx]
sorted_scores = pred_scores[sort_idx]
return sorted_labels, sorted_scores
class ConfusionMatrix(object):
@staticmethod
def compute(sorted_labels: list, sorted_pred_scores: list, score_thresholds: list, ret: list, pos_label=1):
for ret_type in ret:
assert ret_type in ['tp', 'tn', 'fp', 'fn']
sorted_labels = np.array(sorted_labels)
sorted_scores = np.array(sorted_pred_scores)
sorted_labels[sorted_labels != pos_label] = 0
sorted_labels[sorted_labels == pos_label] = 1
score_thresholds = np.array([score_thresholds]).transpose()
pred_labels = (sorted_scores > score_thresholds) + 0
ret_dict = {}
if 'tp' in ret or 'tn' in ret:
match_arr = (pred_labels + sorted_labels)
if 'tp' in ret:
tp_num = (match_arr == 2).sum(axis=-1)
ret_dict['tp'] = tp_num
if 'tn' in ret:
tn_num = (match_arr == 0).sum(axis=-1)
ret_dict['tn'] = tn_num
if 'fp' in ret or 'fn' in ret:
match_arr = (sorted_labels - pred_labels)
if 'fp' in ret:
fp_num = (match_arr == -1).sum(axis=-1)
ret_dict['fp'] = fp_num
if 'fn' in ret:
fn_num = (match_arr == 1).sum(axis=-1)
ret_dict['fn'] = fn_num
return ret_dict
class ThresholdCutter(object):
@staticmethod
def cut_by_step(sorted_scores, steps=0.01):
assert isinstance(steps, float) and (0 < steps < 1)
thresholds = list(set(sorted_scores))
thresholds, cuts = ThresholdCutter.__filt_threshold(thresholds, 0.01)
score_threshold = thresholds
return score_threshold, cuts
@staticmethod
def cut_by_index(sorted_scores):
cuts = np.array([c / 100 for c in range(100)])
data_size = len(sorted_scores)
indexs = [int(data_size * cut) for cut in cuts]
score_threshold = [sorted_scores[idx] for idx in indexs]
return score_threshold, cuts
@staticmethod
def __filt_threshold(thresholds, step):
cuts = list(map(float, np.arange(0, 1, step)))
size = len(list(thresholds))
thresholds.sort(reverse=True)
index_list = [int(size * cut) for cut in cuts]
new_thresholds = [thresholds[idx] for idx in index_list]
return new_thresholds, cuts
@staticmethod
def cut_by_quantile(scores, quantile_list=None, interpolation='nearest', remove_duplicate=True):
if quantile_list is None: # default is 20 intervals
quantile_list = [round(i * 0.05, 3) for i in range(20)] + [1.0]
quantile_val = np.quantile(scores, quantile_list, interpolation=interpolation)
if remove_duplicate:
quantile_val = sorted(list(set(quantile_val)))
else:
quantile_val = sorted(list(quantile_val))
if len(quantile_val) == 1:
quantile_val = [np.min(scores), np.max(scores)]
return quantile_val
class KS(object):
@staticmethod
def compute(labels, pred_scores, pos_label=1):
sorted_labels, sorted_scores = sort_score_and_label(labels, pred_scores)
score_threshold, cuts = ThresholdCutter.cut_by_index(sorted_scores)
confusion_mat = ConfusionMatrix.compute(sorted_labels, sorted_scores, score_threshold, ret=['tp', 'fp'],
pos_label=pos_label)
pos_num, neg_num = neg_pos_count(sorted_labels, pos_label=pos_label)
assert pos_num > 0 and neg_num > 0, "error when computing KS metric, pos sample number and neg sample number" \
"must be larger than 0"
tpr_arr = confusion_mat['tp'] / pos_num
fpr_arr = confusion_mat['fp'] / neg_num
tpr = np.append(tpr_arr, np.array([1.0]))
fpr = np.append(fpr_arr, np.array([1.0]))
cuts = np.append(cuts, np.array([1.0]))
ks_curve = tpr[:-1] - fpr[:-1]
ks_val = np.max(ks_curve)
return ks_val, fpr, tpr, score_threshold, cuts
class BiClassMetric(object):
def __init__(self, cut_method='step', remove_duplicate=False, pos_label=1):
assert cut_method in ['step', 'quantile']
self.cut_method = cut_method
self.remove_duplicate = remove_duplicate # available when cut_method is quantile
self.pos_label = pos_label
def prepare_confusion_mat(self, labels, scores, add_to_end=True, ):
sorted_labels, sorted_scores = sort_score_and_label(labels, scores)
score_threshold, cuts = None, None
if self.cut_method == 'step':
score_threshold, cuts = ThresholdCutter.cut_by_step(sorted_scores, steps=0.01)
if add_to_end:
score_threshold.append(min(score_threshold) - 0.001)
cuts.append(1)
elif self.cut_method == 'quantile':
score_threshold = ThresholdCutter.cut_by_quantile(sorted_scores, remove_duplicate=self.remove_duplicate)
score_threshold = list(np.flip(score_threshold))
confusion_mat = ConfusionMatrix.compute(sorted_labels, sorted_scores, score_threshold,
ret=['tp', 'fp', 'fn', 'tn'], pos_label=self.pos_label)
return confusion_mat, score_threshold, cuts
def compute(self, labels, scores, ):
confusion_mat, score_threshold, cuts = self.prepare_confusion_mat(labels, scores, )
metric_scores = self.compute_metric_from_confusion_mat(confusion_mat)
return list(metric_scores), score_threshold, cuts
def compute_metric_from_confusion_mat(self, *args):
raise NotImplementedError()
class Lift(BiClassMetric):
"""
Compute lift
"""
@staticmethod
def _lift_helper(val):
tp, fp, fn, tn, labels_num = val[0], val[1], val[2], val[3], val[4]
lift_x_type, lift_y_type = [], []
for label_type in ['1', '0']:
if label_type == '0':
tp, tn = tn, tp
fp, fn = fn, fp
if labels_num == 0:
lift_x = 1
denominator = 1
else:
lift_x = (tp + fp) / labels_num
denominator = (tp + fn) / labels_num
if tp + fp == 0:
numerator = 1
else:
numerator = tp / (tp + fp)
if denominator == 0:
lift_y = sys.float_info.max
else:
lift_y = numerator / denominator
lift_x_type.insert(0, lift_x)
lift_y_type.insert(0, lift_y)
return lift_x_type, lift_y_type
def compute(self, labels, pred_scores, pos_label=1):
confusion_mat, score_threshold, cuts = self.prepare_confusion_mat(labels, pred_scores, add_to_end=False, )
lifts_y, lifts_x = self.compute_metric_from_confusion_mat(confusion_mat, len(labels), )
return lifts_y, lifts_x, list(score_threshold)
def compute_metric_from_confusion_mat(self, confusion_mat, labels_len, ):
labels_nums = np.zeros(len(confusion_mat['tp'])) + labels_len
rs = map(self._lift_helper, zip(confusion_mat['tp'], confusion_mat['fp'],
confusion_mat['fn'], confusion_mat['tn'], labels_nums))
rs = list(rs)
lifts_x, lifts_y = [i[0] for i in rs], [i[1] for i in rs]
return lifts_y, lifts_x
class Gain(BiClassMetric):
"""
Compute Gain
"""
@staticmethod
def _gain_helper(val):
tp, fp, fn, tn, num_label = val[0], val[1], val[2], val[3], val[4]
gain_x_type, gain_y_type = [], []
for pos_label in ['1', '0']:
if pos_label == '0':
tp, tn = tn, tp
fp, fn = fn, fp
if num_label == 0:
gain_x = 1
else:
gain_x = float((tp + fp) / num_label)
num_positives = tp + fn
if num_positives == 0:
gain_y = 1
else:
gain_y = float(tp / num_positives)
gain_x_type.insert(0, gain_x)
gain_y_type.insert(0, gain_y)
return gain_x_type, gain_y_type
def compute(self, labels, pred_scores, pos_label=1):
confusion_mat, score_threshold, cuts = self.prepare_confusion_mat(labels, pred_scores, add_to_end=False, )
gain_y, gain_x = self.compute_metric_from_confusion_mat(confusion_mat, len(labels))
return gain_y, gain_x, list(score_threshold)
def compute_metric_from_confusion_mat(self, confusion_mat, labels_len):
labels_nums = np.zeros(len(confusion_mat['tp'])) + labels_len
rs = map(self._gain_helper, zip(confusion_mat['tp'], confusion_mat['fp'],
confusion_mat['fn'], confusion_mat['tn'], labels_nums))
rs = list(rs)
gain_x, gain_y = [i[0] for i in rs], [i[1] for i in rs]
return gain_y, gain_x
class BiClassPrecision(BiClassMetric):
"""
Compute binary classification precision
"""
def compute_metric_from_confusion_mat(self, confusion_mat, formatted=True, impute_val=1.0):
numerator = confusion_mat['tp']
denominator = (confusion_mat['tp'] + confusion_mat['fp'])
zero_indexes = (denominator == 0)
denominator[zero_indexes] = 1
precision_scores = numerator / denominator
precision_scores[zero_indexes] = impute_val # impute_val is for prettifying when drawing pr curves
if formatted:
score_formatted = [[0, i] for i in precision_scores]
return score_formatted
else:
return precision_scores
class MultiClassPrecision(object):
"""
Compute multi-classification precision
"""
def compute(self, labels, pred_scores):
all_labels = list(set(labels).union(set(pred_scores)))
all_labels.sort()
return precision_score(labels, pred_scores, average=None), all_labels
class BiClassRecall(BiClassMetric):
"""
Compute binary classification recall
"""
def compute_metric_from_confusion_mat(self, confusion_mat, formatted=True):
recall_scores = confusion_mat['tp'] / (confusion_mat['tp'] + confusion_mat['fn'])
if formatted:
score_formatted = [[0, i] for i in recall_scores]
return score_formatted
else:
return recall_scores
class MultiClassRecall(object):
"""
Compute multi-classification recall
"""
def compute(self, labels, pred_scores):
all_labels = list(set(labels).union(set(pred_scores)))
all_labels.sort()
return recall_score(labels, pred_scores, average=None), all_labels
class BiClassAccuracy(BiClassMetric):
"""
Compute binary classification accuracy
"""
def compute(self, labels, scores, normalize=True):
confusion_mat, score_threshold, cuts = self.prepare_confusion_mat(labels, scores)
metric_scores = self.compute_metric_from_confusion_mat(confusion_mat, normalize=normalize)
return list(metric_scores), score_threshold[: len(metric_scores)], cuts[: len(metric_scores)]
def compute_metric_from_confusion_mat(self, confusion_mat, normalize=True):
rs = (confusion_mat['tp'] + confusion_mat['tn']) / \
(confusion_mat['tp'] + confusion_mat['tn'] + confusion_mat['fn'] + confusion_mat['fp']) if normalize \
else (confusion_mat['tp'] + confusion_mat['tn'])
return rs[:-1]
class MultiClassAccuracy(object):
"""
Compute multi-classification accuracy
"""
def compute(self, labels, pred_scores, normalize=True):
return accuracy_score(labels, pred_scores, normalize)
class FScore(object):
"""
Compute F score from bi-class confusion mat
"""
@staticmethod
def compute(labels, pred_scores, beta=1, pos_label=1):
sorted_labels, sorted_scores = sort_score_and_label(labels, pred_scores)
score_threshold, cuts = ThresholdCutter.cut_by_step(sorted_scores, steps=0.01)
score_threshold.append(0)
confusion_mat = ConfusionMatrix.compute(sorted_labels, sorted_scores,
score_threshold,
ret=['tp', 'fp', 'fn', 'tn'], pos_label=pos_label)
precision_computer = BiClassPrecision()
recall_computer = BiClassRecall()
p_score = precision_computer.compute_metric_from_confusion_mat(confusion_mat, formatted=False)
r_score = recall_computer.compute_metric_from_confusion_mat(confusion_mat, formatted=False)
beta_2 = beta * beta
denominator = (beta_2 * p_score + r_score)
denominator[denominator == 0] = 1e-6 # in case denominator is 0
numerator = (1 + beta_2) * (p_score * r_score)
f_score = numerator / denominator
return f_score, score_threshold, cuts
class PSI(object):
def compute(self, train_scores: list, validate_scores: list, train_labels=None, validate_labels=None,
debug=False, str_intervals=False, round_num=3, pos_label=1):
"""
train/validate scores: predicted scores on train/validate set
train/validate labels: true labels
debug: print debug message
if train&validate labels are not None, count positive sample percentage in every interval
pos_label: pos label
round_num: round number
str_intervals: return str intervals
"""
train_scores = np.array(train_scores)
validate_scores = np.array(validate_scores)
quantile_points = ThresholdCutter().cut_by_quantile(train_scores)
train_count = self.quantile_binning_and_count(train_scores, quantile_points)
validate_count = self.quantile_binning_and_count(validate_scores, quantile_points)
train_pos_perc, validate_pos_perc = None, None
if train_labels is not None and validate_labels is not None:
assert len(train_labels) == len(train_scores) and len(validate_labels) == len(validate_scores)
train_labels, validate_labels = np.array(train_labels), np.array(validate_labels)
train_pos_count = self.quantile_binning_and_count(train_scores[train_labels == pos_label], quantile_points)
validate_pos_count = self.quantile_binning_and_count(validate_scores[validate_labels == pos_label],
quantile_points)
train_pos_perc = np.array(train_pos_count['count']) / np.array(train_count['count'])
validate_pos_perc = np.array(validate_pos_count['count']) / np.array(validate_count['count'])
# handle special cases
train_pos_perc[train_pos_perc == np.inf] = -1
validate_pos_perc[validate_pos_perc == np.inf] = -1
train_pos_perc[np.isnan(train_pos_perc)] = 0
validate_pos_perc[np.isnan(validate_pos_perc)] = 0
if debug:
print(train_count)
print(validate_count)
assert (train_count['interval'] == validate_count['interval']), 'train count interval is not equal to ' \
'validate count interval'
expected_interval = np.array(train_count['count'])
actual_interval = np.array(validate_count['count'])
expected_interval = expected_interval.astype(np.float)
actual_interval = actual_interval.astype(np.float)
psi_scores, total_psi, expected_interval, actual_interval, expected_percentage, actual_percentage \
= self.psi_score(expected_interval, actual_interval, len(train_scores), len(validate_scores))
intervals = train_count['interval'] if not str_intervals else PSI.intervals_to_str(train_count['interval'],
round_num=round_num)
if train_labels is None and validate_labels is None:
return psi_scores, total_psi, expected_interval, expected_percentage, actual_interval, actual_percentage, \
intervals
else:
return psi_scores, total_psi, expected_interval, expected_percentage, actual_interval, actual_percentage, \
train_pos_perc, validate_pos_perc, intervals
@staticmethod
def quantile_binning_and_count(scores, quantile_points):
"""
left edge and right edge of last interval are closed
"""
assert len(quantile_points) >= 2
left_bounds = copy.deepcopy(quantile_points[:-1])
right_bounds = copy.deepcopy(quantile_points[1:])
last_interval_left = left_bounds.pop()
last_interval_right = right_bounds.pop()
bin_result_1, bin_result_2 = None, None
if len(left_bounds) != 0 and len(right_bounds) != 0:
bin_result_1 = pd.cut(scores, pd.IntervalIndex.from_arrays(left_bounds, right_bounds, closed='left'))
bin_result_2 = pd.cut(scores, pd.IntervalIndex.from_arrays([last_interval_left], [last_interval_right],
closed='both'))
count1 = None if bin_result_1 is None else bin_result_1.value_counts().reset_index()
count2 = bin_result_2.value_counts().reset_index()
# if predict scores are the same, count1 will be None, only one interval exists
final_interval = list(count1['index']) + list(count2['index']) if count1 is not None else list(count2['index'])
final_count = list(count1[0]) + list(count2[0]) if count1 is not None else list(count2[0])
rs = {'interval': final_interval, 'count': final_count}
return rs
@staticmethod
def interval_psi_score(val):
expected, actual = val[0], val[1]
return (actual - expected) * np.log(actual / expected)
@staticmethod
def intervals_to_str(intervals, round_num=3):
str_intervals = []
for interval in intervals:
left_bound, right_bound = '[', ']'
if interval.closed == 'left':
right_bound = ')'
elif interval.closed == 'right':
left_bound = '('
str_intervals.append("{}{}, {}{}".format(left_bound, round(interval.left, round_num),
round(interval.right, round_num), right_bound))
return str_intervals
@staticmethod
def psi_score(expected_interval: np.ndarray, actual_interval: np.ndarray, expect_total_num, actual_total_num,
debug=False):
expected_interval[expected_interval == 0] = 1e-6 # in case no overlap samples
actual_interval[actual_interval == 0] = 1e-6 # in case no overlap samples
expected_percentage = expected_interval / expect_total_num
actual_percentage = actual_interval / actual_total_num
if debug:
print(expected_interval)
print(actual_interval)
print(expected_percentage)
print(actual_percentage)
psi_scores = list(map(PSI.interval_psi_score, zip(expected_percentage, actual_percentage)))
psi_scores = np.array(psi_scores)
total_psi = psi_scores.sum()
return psi_scores, total_psi, expected_interval, actual_interval, expected_percentage, actual_percentage
class KSTest(object):
@staticmethod
def compute(train_scores, validate_scores):
"""
train/validate scores: predicted scores on train/validate set
"""
return stats.ks_2samp(train_scores, validate_scores).pvalue
class AveragePrecisionScore(object):
@staticmethod
def compute(train_scores, validate_scores, train_labels, validate_labels):
"""
train/validate scores: predicted scores on train/validate set
train/validate labels: true labels
"""
train_mAP = average_precision_score(train_labels, train_scores)
validate_mAP = average_precision_score(validate_labels, validate_scores)
return abs(train_mAP - validate_mAP)
class Distribution(object):
@staticmethod
def compute(train_scores: list, validate_scores: list):
"""
train/validate scores: predicted scores on train/validate set
"""
train_scores = np.array(train_scores)
validate_scores = np.array(validate_scores)
validate_scores = dict(validate_scores)
count = 0
for key, value in train_scores:
if key in validate_scores.keys() and value != validate_scores.get(key):
count += 1
return count / len(train_scores)
| apache-2.0 |
justincassidy/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
geopandas/geopandas | geopandas/tools/tests/test_tools.py | 2 | 2954 | from distutils.version import LooseVersion
from shapely.geometry import LineString, MultiPoint, Point
import pyproj
from pyproj import CRS
from geopandas import GeoSeries
from geopandas.tools import collect
from geopandas.tools.crs import epsg_from_crs, explicit_crs_from_epsg
import pytest
# pyproj 2.3.1 fixed a segfault for the case working in an environment with
# 'init' dicts (https://github.com/pyproj4/pyproj/issues/415)
PYPROJ_LT_231 = LooseVersion(pyproj.__version__) < LooseVersion("2.3.1")
class TestTools:
def setup_method(self):
self.p1 = Point(0, 0)
self.p2 = Point(1, 1)
self.p3 = Point(2, 2)
self.mpc = MultiPoint([self.p1, self.p2, self.p3])
self.mp1 = MultiPoint([self.p1, self.p2])
self.line1 = LineString([(3, 3), (4, 4)])
def test_collect_single(self):
result = collect(self.p1)
assert self.p1.equals(result)
def test_collect_single_force_multi(self):
result = collect(self.p1, multi=True)
expected = MultiPoint([self.p1])
assert expected.equals(result)
def test_collect_multi(self):
result = collect(self.mp1)
assert self.mp1.equals(result)
def test_collect_multi_force_multi(self):
result = collect(self.mp1)
assert self.mp1.equals(result)
def test_collect_list(self):
result = collect([self.p1, self.p2, self.p3])
assert self.mpc.equals(result)
def test_collect_GeoSeries(self):
s = GeoSeries([self.p1, self.p2, self.p3])
result = collect(s)
assert self.mpc.equals(result)
def test_collect_mixed_types(self):
with pytest.raises(ValueError):
collect([self.p1, self.line1])
def test_collect_mixed_multi(self):
with pytest.raises(ValueError):
collect([self.mpc, self.mp1])
@pytest.mark.skipif(PYPROJ_LT_231, reason="segfault")
def test_epsg_from_crs(self):
with pytest.warns(FutureWarning):
assert epsg_from_crs({"init": "epsg:4326"}) == 4326
assert epsg_from_crs({"init": "EPSG:4326"}) == 4326
assert epsg_from_crs("+init=epsg:4326") == 4326
@pytest.mark.skipif(PYPROJ_LT_231, reason="segfault")
def test_explicit_crs_from_epsg(self):
with pytest.warns(FutureWarning):
assert explicit_crs_from_epsg(epsg=4326) == CRS.from_epsg(4326)
assert explicit_crs_from_epsg(epsg="4326") == CRS.from_epsg(4326)
assert explicit_crs_from_epsg(crs={"init": "epsg:4326"}) == CRS.from_dict(
{"init": "epsg:4326"}
)
assert explicit_crs_from_epsg(crs="+init=epsg:4326") == CRS.from_proj4(
"+init=epsg:4326"
)
@pytest.mark.filterwarnings("ignore:explicit_crs_from_epsg:FutureWarning")
def test_explicit_crs_from_epsg__missing_input(self):
with pytest.raises(ValueError):
explicit_crs_from_epsg()
| bsd-3-clause |
mstrader/MkidDigitalReadout | DarknessFilters/makeNoiseSpectrum.py | 1 | 3961 | from matplotlib import rcParams, rc
import numpy as np
import sys
import scipy.interpolate
import scipy.signal
from baselineIIR import IirFilter
import matplotlib.pyplot as plt
def makeWienerNoiseSpectrum(data, peakIndices=[], numBefore=100, numAfter=700, noiseOffsetFromPeak=200, sampleRate=1e6, template=[],isVerbose=False,baselineSubtract=True):
nFftPoints = numBefore + numAfter
peakIndices=np.array(peakIndices).astype(int)
#If no peaks, choose random indices to make spectrum
if len(peakIndices)==0:
print 'warning: makeWienerNoiseSpectrum was not passed any peakIndices. Generating random indicies now'
peakIndices=np.array([0])
rate = len(data)/nFftPoints/10
while peakIndices[-1]<(len(data)-1):
prob=np.random.rand()
currentIndex=peakIndices[-1]
peakIndices=np.append(peakIndices,currentIndex+np.ceil(-np.log(prob)/rate).astype(int))
peakIndices=peakIndices[:-2]
if len(peakIndices)==0:
raise ValueError('makeWienerNoiseSpectrum: input data set is too short for the number of FFT points specified')
#Baseline subtract noise data
if(baselineSubtract):
noiseStream = np.array([])
for iPeak,peakIndex in enumerate(peakIndices):
if peakIndex > nFftPoints+noiseOffsetFromPeak and peakIndex < len(data)-numAfter:
noiseStream = np.append(noiseStream, data[peakIndex-nFftPoints-noiseOffsetFromPeak:peakIndex-noiseOffsetFromPeak])
data = data - np.mean(noiseStream)
#Calculate noise spectra for the defined area before each pulse
noiseSpectra = np.zeros((len(peakIndices), nFftPoints))
rejectInd=np.array([])
for iPeak,peakIndex in enumerate(peakIndices):
if peakIndex > nFftPoints+noiseOffsetFromPeak and peakIndex < len(data)-numAfter:
noiseData = data[peakIndex-nFftPoints-noiseOffsetFromPeak:peakIndex-noiseOffsetFromPeak]
noiseSpectra[iPeak] = np.abs(np.fft.fft(data[peakIndex-nFftPoints-noiseOffsetFromPeak:peakIndex-noiseOffsetFromPeak])/nFftPoints)**2
if len(template)!=0:
filteredData=np.correlate(noiseData,template,mode='same')
peakDict=tP.detectPulses(filteredData, nSigmaThreshold = 3., negDerivLenience = 1, bNegativePulses=False)
if len(peakDict['peakIndices'])!=0:
rejectInd=np.append(rejectInd,iPeak)
#Remove indicies with pulses by coorelating with a template if provided
if len(template)!=0:
noiseSpectra = np.delete(noiseSpectra, rejectInd, axis=0)
noiseFreqs = np.fft.fftfreq(nFftPoints,1./sampleRate)
noiseSpectrum = np.median(noiseSpectra,axis=0)
#noiseSpectrum[0] = 2.*noiseSpectrum[1] #look into this later 8/15/16
if isVerbose:
print len(noiseSpectra[:,0]),'traces used to make noise spectrum', len(rejectInd), 'cut for pulse contamination'
return {'noiseSpectrum':noiseSpectrum, 'noiseFreqs':noiseFreqs}
def covFromData(data,size=800,nTrials=None):
nSamples = len(data)
if nTrials is None:
nTrials = nSamples//size
data = data[0:nTrials*size]
data = data.reshape((nTrials,size))
data = data.T
covMatrix = np.cov(data)
covMatrixInv = np.linalg.inv(covMatrix)
return {'covMatrix':covMatrix,'covMatrixInv':covMatrixInv}
def covFromPsd(powerSpectrum,size=None):
autocovariance = np.abs(np.fft.ifft(powerSpectrum))
if size is None:
size = len(autocovariance)
sampledAutocovariance = autocovariance[0:size]
shiftingRow = np.concatenate((sampledAutocovariance[:0:-1],sampledAutocovariance))
covMatrix = []
for iRow in range(size):
covMatrix.append(shiftingRow[size-iRow-1:size-iRow-1+size])
covMatrix = np.array(covMatrix)
covMatrixInv = np.linalg.inv(covMatrix)
return {'covMatrix':covMatrix,'covMatrixInv':covMatrixInv,'autocovariance':sampledAutocovariance}
| gpl-2.0 |
cerebis/meta-sweeper | bin/truthtable.py | 1 | 21629 | """
meta-sweeper - for performing parametric sweeps of simulated
metagenomic sequencing experiments.
Copyright (C) 2016 "Matthew Z DeMaere"
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import copy
import json
from collections import OrderedDict, Iterable, Counter
import numpy as np
import pandas as pd
import yaml
import io_utils
YAML_WIDTH = 1000
def order_rep(dumper, data):
"""
Dump OrderedDict like a regular dict. Will not reserialize ordered in this case.
:param dumper:
:param data:
:return: representer
"""
return dumper.represent_mapping(u'tag:yaml.org,2002:map', data.items(), flow_style=False)
yaml.add_representer(OrderedDict, order_rep)
class AssignmentEncoder(json.JSONEncoder):
"""
Simple JSON Encoder which stores an Assignment as a dict
"""
def default(self, obj):
"""
Members to dictionary
:param obj: instance to convert
:return: dictionary of members
"""
return obj.__dict__
class Assignment:
"""
Represents the assignment of an object to 1 or many classes
"""
def __init__(self, mapping, weight=1):
self.mapping = mapping
self.weight = weight
def get_classes(self):
"""
:return: the list of classes
"""
return self.mapping.keys()
def get_primary_class(self):
"""
The class possessing the largest weight. Ties are broken by
random uniform selection.
:return: most significant (primary) class
"""
if len(self.mapping) == 1:
return self.mapping.keys()[0]
else:
_v = sorted(self.mapping.items(), key=lambda x: x[1], reverse=True)
_nv = np.array([vi[1] for vi in _v])
return _v[np.random.choice(np.where(_nv == _nv.max())[0])][0]
def mean_proportion(self):
"""
:return: the mean of assignment weights
"""
return np.mean(self.mapping.values())
def num_classes(self):
"""
:return: The number of classes to which the object is assigned
"""
return len(self.get_classes())
def __repr__(self):
return str(self)
def __str__(self):
return 'weight={0} mapping={1}'.format(self.weight, str(self.mapping))
def assignment_rep(dumper, data):
"""
Dump Assignment objects as a mapping of member variables
:param dumper:
:param data:
:return: representer
"""
return dumper.represent_mapping(u'tag:yaml.org,2002:map', data.__dict__, flow_style=True)
yaml.add_representer(Assignment, assignment_rep)
class TruthTable(object):
"""
Class which represents a truth table in the general sense.
This can be either a prediction or the ground truth.
The object stores object:class assignments along with a value
representing the support/strength of that assignment. The intention
is that this support can be used to convert multi-class assignments
to a single most significant assignment. It is up to the user
supply to choose a useful support value.
"""
def __init__(self):
self.asgn_dict = {}
self.label_map = {}
self.label_count = Counter()
def __len__(self):
"""
Length of truth table is equal to the number of assignment classes.
:return: number of assignment classes
"""
return len(self.asgn_dict.keys())
def num_symbols(self):
return len(self.label_count)
def num_assignments(self):
return sum(self.label_count.values())
def num_objects(self):
return len(self.asgn_dict.keys())
def degeneracy(self, lengths=None):
"""
Calculate the degeneracy inherent in this truthtable. For non-overlapping assignments (clusterings)
this will be 1. If there is overlap (degenerate assignment) (soft-clustering) then this number
will be >1. If a dictionary of object lengths (weights) is supplied, the measure is weighted
by the size of objects.
:param lengths: a dictionary of object lengths/weights
:return: a value >= 1.
"""
nobj = self.num_objects()
if nobj == 0:
return None
if lengths:
s = 0
l = 0
for k, v in self.asgn_dict.iteritems():
s += v.num_classes() * lengths[k]
l += lengths[k]
return s/float(l)
else:
return float(self.num_assignments()) / nobj
def invert(self):
cl = {}
for oi, clist in self.asgn_dict.iteritems():
for ci in clist.mapping:
if ci not in cl:
cl[ci] = set()
cl[ci].add(oi)
return cl
def mean_overlap(self, lengths=None):
cl = self.invert()
ckeys = cl.keys()
nkeys = len(ckeys)
if nkeys == 0:
return None
ovl = 0.0
if lengths:
for i in xrange(nkeys):
for j in xrange(i+1, nkeys):
int_cls = cl[ckeys[i]] & cl[ckeys[j]]
sint = 0
for ic in int_cls:
sint += lengths[ic]
uni_cls = cl[ckeys[i]] | cl[ckeys[j]]
sovl = 0
for ic in uni_cls:
sovl += lengths[ic]
ovl += sint / float(sovl)
return ovl / (2*nkeys)
else:
for i in xrange(nkeys):
for j in xrange(i+1, nkeys):
int_cls = len(cl[ckeys[i]] & cl[ckeys[j]])
uni_cls = len(cl[ckeys[i]] | cl[ckeys[j]])
ovl += int_cls / float(uni_cls)
return ovl / (2*nkeys)
def overlaps(self, lengths=None):
cl = self.invert()
ckeys = cl.keys()
nkeys = len(ckeys)
if nkeys == 0:
return None
print nkeys
ovl = np.zeros((nkeys, nkeys))
if lengths:
for i in xrange(nkeys):
for j in xrange(nkeys):
int_cls = cl[ckeys[i]] & cl[ckeys[j]]
sint = 0
for ic in int_cls:
sint += lengths[ic]
uni_cls = cl[ckeys[i]] | cl[ckeys[j]]
sovl = 0
for ic in uni_cls:
sovl += lengths[ic]
ovl[i, j] = sint / float(sovl)
else:
for i in xrange(nkeys):
for j in xrange(nkeys):
int_cls = len(cl[ckeys[i]] & cl[ckeys[j]])
uni_cls = len(cl[ckeys[i]] | cl[ckeys[j]])
ovl[i, j] = int_cls / float(uni_cls)
return pd.DataFrame(ovl, index=ckeys, columns=ckeys)
def print_tally(self, max_n=None):
n_symbol = self.num_symbols()
n_assignments = self.num_assignments()
n_objects = self.num_objects()
degen_ratio = 100.0 * self.degeneracy()
print '{0} symbols in table, {1:.0f} assignments of {2} objects ({3:.1f}% degeneracy)'.format(
n_symbol, n_assignments, n_objects, degen_ratio)
print 'ext_symb\tint_symb\tcount\tpercentage'
for n, ci in enumerate(sorted(self.label_count, key=self.label_count.get, reverse=True), start=1):
print '{0}\t{1}\t{2}\t{3:5.3f}'.format(ci,
self.label_map[ci],
self.label_count[ci],
self.label_count[ci] / float(n_assignments))
if n == max_n:
break
def refresh_counter(self):
self.label_count = Counter()
for k, v in self.asgn_dict.iteritems():
self.label_count.update(v.mapping)
def cluster_extents(self, obj_weights):
cl_map = self.invert()
extents = {}
for ci in cl_map:
extents[ci] = np.sum([obj_weights[oi] for oi in cl_map[ci]])
return extents
def cluster_N50(self, obj_weights):
cl_map = self.invert()
n50 = {}
for ci in cl_map:
desc_len = sorted([obj_weights[oi] for oi in cl_map[ci]], reverse=True)
sum_len = np.sum(desc_len)
sum_oi = 0
i = None
for i in xrange(len(desc_len)):
sum_oi += desc_len[i]
if sum_oi > 0.5*sum_len:
break
n50[ci] = desc_len[i]
return n50
def _remove_class(self, cl_id, cl_to_obj=None):
"""
Delete a class from the table.
:param cl_id: id of class to delete
:param cl_to_obj: class to object dict, if None then it is computed.
"""
if not cl_to_obj:
cl_to_obj = self.invert()
for oi in cl_to_obj[cl_id]:
# remove the class assignment from each object
del self.asgn_dict[oi].mapping[cl_id]
if len(self.asgn_dict[oi].mapping) == 0:
# delete the object if it is no longer assigned to any class
del self.asgn_dict[oi]
del self.label_map[cl_id]
def filter_extent(self, min_proportion, obj_weights):
"""
Remove classes which represent less than a threshold proportion
of the total extent of the objects in the table. Object weights/lengths
must be supplied as a dictionary. If an object becomes unassigned, it is removed.
:param min_proportion: threshold minimum extent of a class
:param obj_weights: dict of object weights/lengths
"""
print '##filter_started_with {0}'.format(len(self.label_count.keys()))
# make a inverted mapping, to build the deletion collection
cl_to_obj = self.invert()
sum_weight = float(sum(obj_weights.values()))
for ci in cl_to_obj:
cl_weight = sum(obj_weights[oi] for oi in cl_to_obj[ci])/sum_weight
if cl_weight < min_proportion:
self._remove_class(ci, cl_to_obj)
self.refresh_counter()
if len(self.label_count) == 0:
raise ValueError('Filtering resulted in an empty table')
print '##filter_finished_with {0}'.format(len(self.label_count.keys()))
def filter_class(self, min_proportion):
"""
Remove classes which represent less than a threshold proportion of all
objects in the table. This can be used to address problems of scale wrt
algorithm performance.
:param min_proportion least significant weight for a class assignment to pass
"""
print '##filter_started_with {0}'.format(len(self.label_count.keys()))
cl_to_obj = self.invert()
n_obj = float(sum(self.label_count.values()))
for ci, cl_size in self.label_count.iteritems():
if self.label_count[ci] / n_obj < min_proportion:
self._remove_class(ci, cl_to_obj)
self.refresh_counter()
if len(self.label_count) == 0:
raise ValueError('Filtering resulted in an empty table')
print '##filter_finished_with {0}'.format(len(self.label_count.keys()))
def get_weights(self):
_w = {}
for k, asgn in self.asgn_dict.iteritems():
_w[k] = asgn.weight
return _w
def soft(self, universal=False):
"""
Soft clustering result
:param universal: use universal symbols rather than labels supplied
:return plain dictionary with degenerate classification
"""
_s = OrderedDict()
_keys = sorted(self.asgn_dict.keys())
for k in _keys:
clz = self.asgn_dict[k].mapping.keys()
if universal:
# relabel with universal symbols if requested
clz = [self.label_map[ci] for ci in clz]
_s[k] = set(clz)
return _s
def hard(self, universal=False, use_set=False):
"""
Convert TT to a plain dictionary with the single most significant classification only.
In the case of a tie, no effort is made to be uniformly random in the case of a tie and
dependent on the behaviour of sort.
:param universal: use universal symbols rather than labels supplied
:return plain dict with only one class->cluster mapping.
"""
_s = OrderedDict()
_keys = sorted(self.asgn_dict.keys())
for _k in _keys:
pc = self.asgn_dict[_k].get_primary_class()
if universal:
pc = self.label_map[pc]
_s[_k] = pc if not use_set else set([pc])
return _s
def get(self, key):
return self.asgn_dict.get(key)
def put(self, key, value, weight=None):
self.asgn_dict[key] = Assignment(value)
if weight:
self.asgn_dict[key].weight = weight
def update_from_serialized(self, yd):
"""
Initialise a TruthTable from a generic dict of dicts object most likely
retrieved from a serialized object.
We chose to avoid a custom class here for inter-codebase
portability.
:param yd: generic yaml object, dict of dicts
:return:
"""
for k, v in yd.iteritems():
self.asgn_dict[k] = Assignment(v['mapping'], v['weight'])
self.label_count.update(v['mapping'].keys())
labels = sorted(self.label_count.keys())
self.label_map = dict((l, n) for n, l in enumerate(labels, 1))
def update(self, dt, weights=None, min_score=0):
"""
Initialise the assignment dictionary and also generate a mapping of
class symbol to the positive integers. We can use this as a universal
symbol basis.
:param dt: the dictionary to initialise from
:param weights: new weights for assignments
:param min_score: minimum score to consider
"""
all_asgn = 0
filt_obj = 0
filt_asgn = 0
all_obj = len(dt)
for k, v in dt.iteritems():
v_filt = dict((kv, vv) for kv, vv in v.iteritems() if int(vv) >= min_score)
filt_asgn += len(v) - len(v_filt)
all_asgn += len(v)
if len(v_filt) == 0:
filt_obj += 1
continue
self.asgn_dict[str(k)] = Assignment(v_filt)
if weights:
self.asgn_dict[str(k)].weight = weights[k]
self.label_count.update(v_filt.keys())
if filt_asgn > 0:
print 'Filtered {0}/{1} assignments and {2}/{3} objects below minimum score {4}'.format(
filt_asgn, all_asgn, filt_obj, all_obj, min_score)
labels = sorted(self.label_count.keys())
self.label_map = dict((l, n) for n, l in enumerate(labels, 1))
def to_vector(self):
vec = {}
keys = sorted(self.asgn_dict.keys())
hd = self.hard()
for k in keys:
vec[k] = hd[k]
return vec
def write(self, pathname, fmt='json'):
"""
Write the full table in either JSON or YAML format"
:param pathname: the output path
:param fmt: json or yaml
"""
TruthTable._write_dict(self.asgn_dict, pathname, fmt=fmt, encoder=AssignmentEncoder)
def write_hard(self, pathname, fmt='json'):
"""
Write a plain dictionary representation of only the most significant
object:class assignments.
:param pathname: the output path
:param fmt: json, yaml or delim
"""
TruthTable._write_dict(self.hard(), pathname, fmt=fmt)
@staticmethod
def _write_dict(d, pathname, fmt='json', sep='\t', encoder=None):
"""
Serialize a plain dict to file
:param d: dict to serialize
:param pathname: output path
:param fmt: json, yaml or delim
:param sep: delimited format separator
"""
with open(pathname, 'w') as h_out:
if fmt == 'json':
json.dump(d, h_out, cls=encoder, indent=1)
elif fmt == 'yaml':
yaml.dump(d, h_out, default_flow_style=False, width=YAML_WIDTH)
elif fmt == 'delim':
for qry, sbjs in d.iteritems():
line = [str(qry)] + [str(si) for si in sorted(sbjs)]
h_out.write('{0}\n'.format(sep.join(line)))
else:
raise RuntimeError('Unknown format requested [{0}]'.format(fmt))
def read_truth(pathname, fmt='json'):
"""
Read a TruthTable in YAML format
:param pathname: path to truth table
:param fmt: json or yaml
:return: truth table
"""
with open(pathname, 'r') as h_in:
tt = TruthTable()
if fmt == 'json':
d = io_utils.json_load_byteified(h_in)
elif fmt == 'yaml':
d = yaml.load(h_in)
else:
raise RuntimeError('Unsupported format requested [{0}]'.format(format))
tt.update_from_serialized(d)
return tt
def read_mcl(pathname):
"""
Read a MCL solution file converting this to a TruthTable
:param pathname: mcl output file
:return: truth table
"""
with open(pathname, 'r') as h_in:
# read the MCL file, which lists all members of a class on a single line
# the class ids are implicit, therefore we use line number.
mcl = {}
for ci, line in enumerate(h_in, start=1):
objects = line.rstrip().split()
for oi in objects:
if oi not in mcl:
mcl[oi] = {}
mcl[oi][ci] = 1.0 # there are no weights, therefore give them all 1
# initialise the table
tt = TruthTable()
tt.update(mcl)
return tt
def unique_labels(dt):
"""
Extract the unique set of class labels used in the truth table
:param dt: dictionary representation of truth table to analyze
:return: sorted set of unique labels
"""
labels = set()
for v in dt.values():
if isinstance(v, Iterable):
labels.update(v)
else:
labels.add(v)
return sorted(labels)
def crosstab(dt1, dt2):
"""
Cross-tabulate two truth tables on hard clusters.
:param dt1: first dictionary rep of truth table
:param dt2: second dictionary rep of truth table
:return: pandas dataframe
"""
joined_keys = sorted(set(dt1.keys() + dt2.keys()))
rows = unique_labels(dt1)
cols = unique_labels(dt2)
ctab = pd.DataFrame(0, index=rows, columns=cols)
for k in joined_keys:
if k in dt1 and k in dt2:
i1 = dt1[k]
i2 = dt2[k]
ctab.loc[i1, i2] += 1
return ctab
def simulate_error(tt, p_mut, p_indel, extra_symb=()):
"""
Simple method for introducing error in a truth table. This is useful when
testing clustering metrics (Fm, Vm, Bcubed, etc). By default, the list of possible
symbols is taken from those already assigned, but a user may provide additional
symbols. These can provide a useful source of novelty, when for instance
an object is already assigned to all existing class symbols.
:param tt: the truth table to add error
:param p_mut: the probably of a class mutation
:param p_indel: the probability of deletion or insertion of a class to an object
:param extra_symb: extra class symbols for inserting
:return: truth table mutatant
"""
symbols = list(set(tt.label_count.keys() + extra_symb))
print symbols
mut_dict = copy.deepcopy(tt.asgn_dict)
for o_i in mut_dict.keys():
others = list(set(symbols) - set(mut_dict[o_i].mapping))
if np.random.uniform() < p_mut:
if len(others) > 0:
c_mut = np.random.choice(others, 1)[0]
c_old = np.random.choice(mut_dict[o_i].mapping.keys(), 1)[0]
# retain the weighting from the original to mutated
# we do this pedantically so its easy to read
weight = mut_dict[o_i].mapping[c_old]
mut_dict[o_i].mapping[c_mut] = weight
del mut_dict[o_i].mapping[c_old]
if np.random.uniform() < p_indel:
# flip a coin, delete or insert
if np.random.uniform() < 0.5:
# delete
c_del = np.random.choice(mut_dict[o_i].mapping.keys(), 1)[0]
del mut_dict[o_i].mapping[c_del]
elif len(others) > 0:
# insert from 'others'
c_add = np.random.choice(others, 1)[0]
num_cl = mut_dict[o_i].num_classes()
adj_fac = float(num_cl / (num_cl+1.))
ins_prop = mut_dict[o_i].mean_proportion() * adj_fac
for k in mut_dict[o_i].mapping:
mut_dict[o_i].mapping[k] *= adj_fac
mut_dict[o_i].mapping[c_add] = ins_prop
mut_tt = TruthTable()
mut_tt.update_from_serialized(mut_dict)
return mut_tt
| gpl-3.0 |
abhishek8gupta/sp17-i524 | project/S17-IO-3012/code/bin/benchmark_version_import.py | 19 | 4590 | import matplotlib.pyplot as plt
import sys
import pandas as pd
def get_parm():
"""retrieves mandatory parameter to program
@param: none
@type: n/a
"""
try:
return sys.argv[1]
except:
print ('Must enter file name as parameter')
exit()
def read_file(filename):
"""reads a file into a pandas dataframe
@param: filename The name of the file to read
@type: string
"""
try:
return pd.read_csv(filename)
except:
print ('Error retrieving file')
exit()
def select_data(benchmark_df, mongo_version, config_replicas, mongos_instances, shard_replicas, shards_per_replica):
benchmark_df = benchmark_df[benchmark_df.cloud == "chameleon"]
benchmark_df = benchmark_df[benchmark_df.test_size == "large"]
if mongo_version != 'X':
benchmark_df = benchmark_df[benchmark_df.mongo_version == mongo_version]
if config_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.config_replicas == config_replicas]
if mongos_instances != 'X':
benchmark_df = benchmark_df[benchmark_df.mongos_instances == mongos_instances]
if shard_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.shard_replicas == shard_replicas]
if shards_per_replica != 'X':
benchmark_df = benchmark_df[benchmark_df.shards_per_replica == shards_per_replica]
#benchmark_df1 = benchmark_df.groupby(['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica']).mean()
#http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
benchmark_df = benchmark_df.groupby(['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica'], as_index=False).mean()
#http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
#print benchmark_df1['shard_replicas']
#print benchmark_df1
#print benchmark_df
benchmark_df = benchmark_df.sort_values(by='shard_replicas', ascending=1)
return benchmark_df
def make_figure(import_seconds_32, shards_32, import_seconds_34, shards_34):
"""formats and creates a line chart
@param1: find_seconds_kilo Array with find_seconds from kilo
@type: numpy array
@param2: shards_kilo Array with shards from kilo
@type: numpy array
@param3: find_seconds_chameleon Array with find_seconds from chameleon
@type: numpy array
@param4: shards_chameleon Array with shards from chameleon
@type: numpy array
"""
fig = plt.figure()
#plt.title('Average MongoImport Runtime with Various Numbers of Shards')
plt.ylabel('Runtime in Seconds')
plt.xlabel('Number of Shards')
# Make the chart
plt.plot(shards_32, import_seconds_32, label='Version 3.2')
plt.plot(shards_34, import_seconds_34, label='Version 3.4')
#http://stackoverflow.com/questions/11744990/how-to-set-auto-for-upper-limit-but-keep-a-fixed-lower-limit-with-matplotlib
plt.ylim(ymin=0)
plt.legend(loc='best')
# Show the chart (for testing)
# plt.show()
# Save the chart
fig.savefig('../report/version_import.png')
# Run the program by calling the functions
if __name__ == "__main__":
filename = get_parm()
benchmark_df = read_file(filename)
mongo_version = 32
config_replicas = 1
mongos_instances = 1
shard_replicas = 'X'
shards_per_replica = 1
select_df = select_data(benchmark_df, mongo_version, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
#http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
#percentage death=\
import_seconds_32=select_df.as_matrix(columns=[select_df.columns[6]])
shards_32 = select_df.as_matrix(columns=[select_df.columns[3]])
#http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
mongo_version = 34
config_replicas = 1
mongos_instances = 1
shard_replicas = 'X'
shards_per_replica = 1
select_df = select_data(benchmark_df, mongo_version, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
#http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
#percentage death=\
import_seconds_34=select_df.as_matrix(columns=[select_df.columns[6]])
shards_34 = select_df.as_matrix(columns=[select_df.columns[3]])
#http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
make_figure(import_seconds_32, shards_32, import_seconds_34, shards_34)
| apache-2.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/sklearn/examples/decomposition/plot_pca_vs_fa_model_selection.py | 1 | 5331 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
print(__doc__)
# #############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
# #############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA(svd_solver='full')
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(svd_solver='full', n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa,
linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
# plt.show()
pltshow(plt)
| mit |
airanmehr/bio | Scripts/Miscellaneous/Tutorials/demography.py | 1 | 2014 | '''
Copyleft Mar 10, 2017 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: airanmehr@gmail.com
'''
import numpy as np;
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = False
# import seaborn as sns
import pylab as plt;
import matplotlib as mpl
import os;
import simuPOP as sim
from simuPOP.demography import *
model = MultiStageModel([
InstantChangeModel(T=200,
# start with an ancestral population of size 1000
N0=(1000, 'Ancestral'),
# change population size at 50 and 60
G=[50, 60],
# change to population size 200 and back to 1000
NG=[(200, 'bottleneck'), (1000, 'Post-Bottleneck')]),
ExponentialGrowthModel(
T=50,
# split the population into two subpopulations
N0=[(400, 'P1'), (600, 'P2')],
# expand to size 4000 and 5000 respectively
NT=[4000, 5000])]
)
def exp(T=10):return ExponentialGrowthModel(T=T, N0=1000, NT=200)
def lin(T=10):return LinearGrowthModel(T=T, N0=200, NT=1000)
model=MultiStageModel([exp(),lin(),exp(),lin(),exp(),lin(),exp(),lin(),exp(),lin()])
# model.init_size returns the initial population size
# migrate_to is required for migration
model=exp(50)
#model=lin(50)
#model=MultiStageModel([exp(50),lin(50)])
pop = sim.Population(size=model.init_size, loci=1,
infoFields=model.info_fields)
pop.evolve(
initOps=[
sim.InitSex(),
sim.InitGenotype(freq=[0.5, 0.5])
],
matingScheme=sim.RandomMating(subPopSize=model),
finalOps=
sim.Stat(alleleFreq=0, vars=['alleleFreq_sp']),
gen=model.num_gens
)
model
# print out population size and frequency
#for idx, name in enumerate(pop.subPopNames()):
#print('%s (%d): %.4f' % (name, pop.subPopSize(name), pop.dvars(idx).alleleFreq[0][0]))
# get a visual presentation of the demographic model
import matplotlib
model.plot('/home/arya/bottleneck.png',title='bottleneck') | mit |
thilbern/scikit-learn | examples/classification/plot_classifier_comparison.py | 181 | 4699 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
JonWel/CoolProp | Web/conf.py | 3 | 11060 |
# -*- coding: utf-8 -*-
#
# sampledoc documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 11 05:04:40 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import CoolProp
ver = CoolProp.__version__
# The short X.Y version.
version = ver.rsplit('.',1)[0]
# The full version, including alpha/beta/rc tags.
release = ver
# Some hacking to determine release or nightly build
if ver[-2]=="." and ver[-1].isdigit()>=0 and ver[-3].isdigit()>=0:
isRelease = True
else:
isRelease = False
print("------------ Project information ------------")
print("Detected version: "+version)
print("Detected release: "+release)
print("Public release : "+ ("True" if isRelease else "False") )
print(" ")
if isRelease:
extlinks = {'sfdownloads': ('http://sourceforge.net/projects/coolprop/files/CoolProp/'+release+'/%s',''),
'sfnightly' : ('http://sourceforge.net/projects/coolprop/files/CoolProp/nightly/%s',''),
#'bbbinaries' : ('http://www.coolprop.dreamhosters.com:8010/binaries/%s',''),
#'bbsphinx' : ('http://www.coolprop.dreamhosters.com:8010/sphinx/%s','')
}
else:
extlinks = {'sfdownloads': ('http://sourceforge.net/projects/coolprop/files/CoolProp/'+release+'/%s',''),
'sfnightly' : ('http://www.coolprop.dreamhosters.com/binaries/%s',''),
#'bbbinaries' : ('http://www.coolprop.dreamhosters.com:8010/binaries/%s',''),
#'bbsphinx' : ('http://www.coolprop.dreamhosters.com:8010/sphinx/%s','')
}
import sys, os, datetime
#~ # If your extensions are in another directory, add it here. If the directory
#~ # is relative to the documentation root, use os.path.abspath to make it
#~ # absolute, like shown here.
#~ sys.path.append(os.path.abspath('sphinxext'))
sys.path.insert(0, os.path.abspath('_ext'))
try:
import sphinxcontrib.doxylink
except ImportError:
print('Unable to import sphinxcontrib.doxylink; try to run "pip install sphinxcontrib-doxylink"')
if isRelease:
doxylink = {
'cpapi' : ('_static/doxygen/CoolPropDoxyLink.tag', 'http://www.coolprop.org/_static/doxygen/html')
}
else:
doxylink = {
'cpapi' : ('_static/doxygen/CoolPropDoxyLink.tag', 'http://www.coolprop.dreamhosters.com/binaries/sphinx/_static/doxygen/html')
}
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
'sphinx.ext.intersphinx',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.extlinks',
'sphinxcontrib.bibtex',
'sphinxcontrib.napoleon',
'sphinxcontrib.doxylink',
'matplotlib.sphinxext.plot_directive',
'edit_on_github', # see https://gist.github.com/mgedmin/6052926#file-edit_on_github-pyb
# cloud's extensions
#'cloud_sptheme.ext.autodoc_sections',
'cloud_sptheme.ext.index_styling',
'cloud_sptheme.ext.relbar_toc',
#'cloud_sptheme.ext.escaped_samp_literals',
'cloud_sptheme.ext.issue_tracker',
#'cloud_sptheme.ext.table_styling',
#'inheritance_diagram',
#'numpydoc',
#'breathe'
]
# set path to issue tracker:
issue_tracker_url = "gh:CoolProp/CoolProp"
plot_formats = [('png',80),('.pdf')]
index_doc = "index"
numpydoc_show_class_members = False
mathjax_path = 'http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
d = datetime.datetime.today()
project = u'CoolProp'
copyright = u'2010-{0}, Ian H. Bell and the CoolProp Team'.format(d.year)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build','sphinxext']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
#This value selects what content will be inserted into the main body of an autoclass directive.
#'class' - Only the class’ docstring is inserted. This is the default.
#'init' - Only the __init__ method’s docstring is inserted.
#'both' - Both the class’ and the __init__ method’s docstring are concatenated and inserted
autoclass_content = 'both'
# -- Options for HTML output ---------------------------------------------------
try:
import cloud_sptheme as csp
except:
print('unable to import cloud_sptheme as csp; try a "pip install cloud_sptheme"')
# import Cloud
import cloud_sptheme as csp
# ... some contents omitted ...
# set the html theme
html_theme = "cloud"
# NOTE: there is also a red-colored version named "redcloud"
# ... some contents omitted ...
# set the theme path to point to cloud's theme data
html_theme_path = [csp.get_theme_dir()]
# [optional] set some of the options listed above...
html_theme_options = { "roottarget": "index",
"max_width" : "13in",
"logotarget": "index",
"googleanalytics_id": "UA-53205480-2",
"default_layout_text_size": "85%"
}
edit_on_github_project = 'CoolProp/CoolProp'
edit_on_github_branch = 'master'
edit_on_github_path_prefix = 'Web'
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
## html_theme = 'sphinxdoc'
## html_theme='nature'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'CoolProp2.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/CoolPropLogo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
#html_index = 'index.html'
# Custom sidebar templates, maps page names to templates.
# html_sidebars = {'index': 'indexsidebar.html',
# }
html_sidebars = {
'**': ['globaltoc.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'sampledocdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'CoolPropdoc.tex', u'CoolProp Documentation',
u'Ian Bell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| mit |
mrlb05/Nifty4Gemini | nifty/pipeline/steps/routines/nifsMakeTelluric.py | 4 | 13051 | import sys, glob, shutil, getopt, os, time, logging, glob, sgmllib, urllib, re, traceback, pkg_resources
import pexpect as p
from pyraf import iraf, iraffunctions
import astropy.io.fits
from astropy.io.fits import getdata, getheader
import numpy as np
from scipy.interpolate import interp1d
from scipy import arange, array, exp
from scipy.ndimage.interpolation import shift
import pylab as pl
import matplotlib.pyplot as plt
# LOCAL
# Import config parsing.
from ..configobj.configobj import ConfigObj
# Import custom Nifty functions.
from ..nifsUtils import datefmt, listit, writeList, checkLists, makeSkyList, MEFarith, convertRAdec
# Import Nifty python data cube merging script.
from .nifsMerge import mergeCubes
# Define constants
# Paths to Nifty data.
RECIPES_PATH = pkg_resources.resource_filename('nifty', 'recipes/')
RUNTIME_DATA_PATH = pkg_resources.resource_filename('nifty', 'runtimeData/')
def makeTelluricCorrection(
telluricDirectory, path, continuuminter, hlineinter, tempInter, hline_method="vega", spectemp="",
mag="", log="test.log", over=False):
"""FLUX CALIBRATION
Consists of this start function and six required functions at the end of
this file.
"""
"""iraf.gemini(_doprint=0, motd="no")
iraf.gnirs(_doprint=0)
iraf.imutil(_doprint=0)
iraf.onedspec(_doprint=0)
iraf.nsheaders('nifs',Stdout='/dev/null')"""
# Overview of Telluric Correction procedure:
# We make a telluric correction by:
# Remove H-lines from combined 1D standard star spectrum.
# Divide by H-line corrected standard spectrum by continuum fit.
# We apply a telluric correction by:
# Dividing the cube by the correction spectrum (with iraf.telluric) to figure out the shift and scaling.
# Dividing again by the continuum to add a continuum shape back in.
# Telluric correction done.
# Overview of flux calibration procedure:
# Make a blackbody spectrum.
# Scale to the observed magnitude of the standard.
# Multiply telluric corrected target spectrum by this scaled blackbody.
# Done!
iraffunctions.chdir(telluricDirectory)
logging.info('I am starting to create telluric correction spectrum and blackbody spectrum')
logging.info('I am starting to create telluric correction spectrum and blackbody spectrum ')
# Open the combine extracted 1d spectrum.
try:
combined_extracted_1d_spectra = str(open('telluricfile', 'r').readlines()[0]).strip()
except:
logging.info("No telluricfile found in " + str(telluricDirectory) + "Skipping telluric correction and flux calibration.")
return
if not os.path.exists('scienceMatchedTellsList'):
logging.info("No scienceMatchedTellsList found in " + str(telluricDirectory))
return
telheader = astropy.io.fits.open(combined_extracted_1d_spectra+'.fits')
grating = telheader[0].header['GRATING'][0]
# Get standard star spectral type, teff, and magnitude from the interwebs. Go forth, brave parser!
getStandardInfo(path, mag, grating, spectemp)
hLineCorrection(combined_extracted_1d_spectra, grating, path, hlineinter, tempInter, hline_method, log, over)
# Fit a continuum from the standard star spectrum, saving both continuum and continuum divided standard spectrum.
fitContinuum(continuuminter, tempInter, grating)
# Divide the standard star spectrum by the continuum to normalize it.
if os.path.exists("telluricCorrection.fits"):
os.remove("telluricCorrection.fits")
iraf.imarith('final_tel_no_hlines_no_norm', "/", 'fit', result='telluricCorrection',title='',divzero=0.0,hparams='',pixtype='',calctype='',verbose='no',noact='no',mode='al')
# Done deriving telluric correction! We have two new products:
# 1) A continuum-normalized telluric correction spectrum, telluricCorrection.fits, and
# 2) The continuum we used to normalize it, fit.fits.
def hLineCorrection(combined_extracted_1d_spectra, grating, path, hlineinter, tempInter, hline_method, log, over, airmass_std=1.0):
"""
Remove hydrogen lines from the spectrum of a telluric standard,
using a model of vega's atmosphere.
"""
# File for recording shift/scale from calls to "telluric"
telluric_shift_scale_record = open('telluric_hlines.txt', 'w')
# Remove H lines from standard star correction spectrum
no_hline = False
if os.path.exists("final_tel_no_hlines_no_norm.fits"):
if over:
iraf.delete("final_tel_no_hlines_no_norm.fits")
else:
no_hline = True
logging.info("Output file exists and -over- not set - skipping H line removal")
if hline_method == "vega" and not no_hline:
vega(combined_extracted_1d_spectra, grating, path, hlineinter, telluric_shift_scale_record, log, over)
#if hline_method == "linefitAuto" and not no_hline:
# linefitAuto(combined_extracted_1d_spectra, grating)
# Disabled and untested because interactive scripted iraf tasks are broken...
#if hline_method == "linefitManual" and not no_hline:
# linefitManual(combined_extracted_1d_spectra+'[sci,1]', grating)
#if hline_method == "vega_tweak" and not no_hline:
#run vega removal automatically first, then give user chance to interact with spectrum as well
# vega(combined_extracted_1d_spectra,grating, path, hlineinter, telluric_shift_scale_record, log, over)
# linefitManual("final_tel_no_hlines_no_norm", grating)
#if hline_method == "linefit_tweak" and not no_hline:
#run Lorentz removal automatically first, then give user chance to interact with spectrum as well
# linefitAuto(combined_extracted_1d_spectra,grating)
# linefitManual("final_tel_no_hlines_no_norm", grating)
if hline_method == "none" and not no_hline:
#need to copy files so have right names for later use
iraf.imcopy(input=combined_extracted_1d_spectra+'[sci,'+str(1)+']', output="final_tel_no_hlines_no_norm", verbose='no')
# Plot the non-hline corrected spectrum and the h-line corrected spectrum.
uncorrected = astropy.io.fits.open(combined_extracted_1d_spectra+'.fits')[1].data
corrected = astropy.io.fits.open("final_tel_no_hlines_no_norm.fits")[0].data
if hlineinter or tempInter:
plt.title('Before and After HLine Correction')
plt.plot(uncorrected)
plt.plot(corrected)
plt.show()
def vega(spectrum, band, path, hlineinter, telluric_shift_scale_record, log, over, airmass=1.0):
"""
Use iraf.telluric to remove H lines from standard star, then remove
normalization added by telluric with iraf.imarith.
The extension for vega_ext.fits is specified from band (from header of
telluricfile.fits).
Args:
spectrum (string): filename from 'telluricfile'.
band: from telluricfile .fits header. Eg 'K', 'H', 'J'.
path: usually top directory with Nifty scripts.
hlineinter (boolean): Interactive H line fitting. Specified with -i at
command line. Default False.
airmass: from telluricfile .fits header.
telluric_shift_scale_record: "pointer" to telluric_hlines.txt.
log: path to logfile.
over (boolean): overwrite old files. Specified at command line.
"""
if band=='K':
ext = '1'
sample = "21537:21778"
scale = 0.8
if band=='H':
ext = '2'
sample = "16537:17259"
scale = 0.7
if band=='J':
ext = '3'
sample = "11508:13492"
scale = 0.885
if band=='Z':
ext = '4'
sample = "*"
scale = 0.8
if os.path.exists("tell_nolines.fits"):
if over:
os.remove("tell_nolines.fits")
tell_info = iraf.telluric(input=spectrum+"[1]", output='tell_nolines', cal= RUNTIME_DATA_PATH+'vega_ext.fits['+ext+']', xcorr='yes', tweakrms='yes', airmass=airmass, inter=hlineinter, sample=sample, threshold=0.1, lag=3, shift=0., dshift=0.05, scale=scale, dscale=0.05, offset=0., smooth=1, cursor='', mode='al', Stdout=1)
else:
logging.info("Output file exists and -over not set - skipping H line correction")
else:
tell_info = iraf.telluric(input=spectrum+"[1]", output='tell_nolines', cal= RUNTIME_DATA_PATH+'vega_ext.fits['+ext+']', xcorr='yes', tweakrms='yes', inter=hlineinter, airmass=airmass, sample=sample, threshold=0.1, lag=3, shift=0., dshift=0.05, scale=scale, dscale=0.05, offset=0., smooth=1, cursor='', mode='al', Stdout=1)
# need this loop to identify telluric output containing warning about pix outside calibration limits (different formatting)
if "limits" in tell_info[-1].split()[-1]:
norm=tell_info[-2].split()[-1]
else:
norm=tell_info[-1].split()[-1]
if os.path.exists("final_tel_no_hlines_no_norm.fits"):
if over:
os.remove("final_tel_no_hlines_no_norm.fits")
iraf.imarith(operand1='tell_nolines', op='/', operand2=norm, result='final_tel_no_hlines_no_norm', title='', divzero=0.0, hparams='', pixtype='', calctype='', verbose='yes', noact='no', mode='al')
else:
logging.info("Output file exists and -over not set - skipping H line normalization")
else:
iraf.imarith(operand1='tell_nolines', op='/', operand2=norm, result='final_tel_no_hlines_no_norm', title='', divzero=0.0, hparams='', pixtype='', calctype='', verbose='yes', noact='no', mode='al')
# TODO(nat): linefitAuto and linefitManual could be useful at some point.
def linefitAuto(spectrum, band):
"""automatically fit Lorentz profiles to lines defined in existing cur* files
Go to x position in cursor file and use space bar to find spectrum at each of those points
"""
specpos = iraf.bplot(images=spectrum+'[SCI,1]', cursor='cur'+band, Stdout=1, StdoutG='/dev/null')
specpose = str(specpos).split("'x,y,z(x):")
nextcur = 'nextcur'+band+'.txt'
# Write line x,y info to file containing Lorentz fitting commands for bplot
write_line_positions(nextcur, specpos)
iraf.delete('final_tel_no_hlines_no_norm.fits',ver="no",go_ahead='yes',Stderr='/dev/null')
# Fit and subtract Lorentz profiles. Might as well write output to file.
iraf.bplot(images=spectrum+'[sci,1]',cursor='nextcur'+band+'.txt', new_image='final_tel_no_hlines_no_norm', overwrite="yes",StdoutG='/dev/null',Stdout='Lorentz'+band)
def linefitManual(spectrum, band):
""" Enter splot so the user can fit and subtract lorents (or, actually, any) profiles
"""
iraf.splot(images=spectrum, new_image='final_tel_no_hlines_no_norm', save_file='../PRODUCTS/lorentz_hlines.txt', overwrite='yes')
# it's easy to forget to use the 'i' key to actually write out the line-free spectrum, so check that it exists:
# with the 'tweak' options, the line-free spectrum will already exists, so this lets the user simply 'q' and move on w/o editing (too bad if they edit and forget to hit 'i'...)
while True:
try:
with open("final_tel_no_hlines_no_norm.fits") as f: pass
break
except IOError as e:
logging.info("It looks as if you didn't use the i key to write out the lineless spectrum. We'll have to try again. --> Re-entering splot")
iraf.splot(images=spectrum, new_image='final_tel_no_hlines_no_norm', save_file='../PRODUCTS/lorentz_hlines.txt', overwrite='yes')
def fitContinuum(continuuminter, tempInter, grating):
"""
Fit a continuum to the telluric correction spectrum to normalize it. The continuum
fitting regions were derived by eye and can be improved.
Results are in fit<Grating>.fits
"""
# These were found to fit the curves well by hand. You can probably improve them; feel free to fiddle around!
if grating == "K":
order = 5
sample = "20279:20395,20953:24283"
elif grating == "J":
order = 5
sample = "11561:12627,12745:12792,12893:13566"
elif grating == "H":
order = 5
sample = "*"
elif grating == "Z":
order = 5
sample = "9453:10015,10106:10893,10993:11553"
if os.path.exists("fit.fits"):
os.remove("fit.fits")
iraf.continuum(input='final_tel_no_hlines_no_norm',output='fit',ask='yes',lines='*',bands='1',type="fit",replace='no',wavescale='yes',logscale='no',override='no',listonly='no',logfiles='',inter=continuuminter,sample=sample,naverage=1,func='spline3',order=order,low_rej=1.0,high_rej=3.0,niterate=2,grow=1.0,markrej='yes',graphics='stdgraph',cursor='',mode='ql')
# Plot the telluric correction spectrum with the continuum fit.
final_tel_no_hlines_no_norm = astropy.io.fits.open('final_tel_no_hlines_no_norm.fits')[0].data
fit = astropy.io.fits.open('fit.fits')[0].data
if continuuminter or tempInter:
plt.title('Unnormalized Telluric Correction and Continuum fit Used to Normalize')
plt.plot(final_tel_no_hlines_no_norm)
plt.plot(fit)
plt.show()
def divideByContinuum(inputSpectra, divisor, )
| mit |
RayMick/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
HolgerPeters/scikit-learn | doc/conf.py | 22 | 9789 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
import sphinx_gallery
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'numpy_ext.numpydoc',
'sphinx.ext.linkcode', 'sphinx.ext.doctest',
'sphinx_gallery.gen_gallery',
'sphinx_issues',
]
# pngmath / imgmath compatibility layer for different sphinx versions
import sphinx
from distutils.version import LooseVersion
if LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2007 - 2017, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
sphinx_gallery_conf = {
'doc_module': 'sklearn',
'reference_url': {
'sklearn': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
'nibabel': 'http://nipy.org/nibabel'}
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'sphx_glr_plot_classifier_comparison_001.png': 600,
'sphx_glr_plot_outlier_detection_003.png': 372,
'sphx_glr_plot_gpr_co2_001.png': 350,
'sphx_glr_plot_adaboost_twoclass_001.png': 372,
'sphx_glr_plot_compare_methods_001.png': 349}
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print('Preparing carousel images')
image_dir = os.path.join(app.builder.outdir, '_images')
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png')
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
# Config for sphinx_issues
issues_uri = 'https://github.com/scikit-learn/scikit-learn/issues/{issue}'
issues_github_path = 'scikit-learn/scikit-learn'
issues_user_uri = 'https://github.com/{user}'
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('build-finished', make_carousel_thumbs)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
alberto-antonietti/nest-simulator | pynest/examples/structural_plasticity.py | 6 | 13978 | # -*- coding: utf-8 -*-
#
# structural_plasticity.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Structural Plasticity example
----------------------------------
This example shows a simple network of two populations where structural
plasticity is used. The network has 1000 neurons, 80% excitatory and
20% inhibitory. The simulation starts without any connectivity. A set of
homeostatic rules are defined, according to which structural plasticity will
create and delete synapses dynamically during the simulation until a desired
level of electrical activity is reached. The model of structural plasticity
used here corresponds to the formulation presented in [1]_.
At the end of the simulation, a plot of the evolution of the connectivity
in the network and the average calcium concentration in the neurons is created.
References
~~~~~~~~~~~
.. [1] Butz, M., and van Ooyen, A. (2013). A simple rule for dendritic spine and axonal bouton formation can
account for cortical reorganization after focal retinal lesions. PLoS Comput. Biol. 9 (10), e1003259.
"""
####################################################################################
# First, we have import all necessary modules.
import nest
import numpy
import matplotlib.pyplot as plt
import sys
####################################################################################
# We define general simulation parameters
class StructralPlasticityExample:
def __init__(self):
# simulated time (ms)
self.t_sim = 200000.0
# simulation step (ms).
self.dt = 0.1
self.number_excitatory_neurons = 800
self.number_inhibitory_neurons = 200
# Structural_plasticity properties
self.update_interval = 10000.0
self.record_interval = 1000.0
# rate of background Poisson input
self.bg_rate = 10000.0
self.neuron_model = 'iaf_psc_exp'
####################################################################################
# In this implementation of structural plasticity, neurons grow
# connection points called synaptic elements. Synapses can be created
# between compatible synaptic elements. The growth of these elements is
# guided by homeostatic rules, defined as growth curves.
# Here we specify the growth curves for synaptic elements of excitatory
# and inhibitory neurons.
# Excitatory synaptic elements of excitatory neurons
self.growth_curve_e_e = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': 0.05, # Ca2+
}
# Inhibitory synaptic elements of excitatory neurons
self.growth_curve_e_i = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': self.growth_curve_e_e['eps'], # Ca2+
}
# Excitatory synaptic elements of inhibitory neurons
self.growth_curve_i_e = {
'growth_curve': "gaussian",
'growth_rate': 0.0004, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': 0.2, # Ca2+
}
# Inhibitory synaptic elements of inhibitory neurons
self.growth_curve_i_i = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': self.growth_curve_i_e['eps'] # Ca2+
}
# Now we specify the neuron model.
self.model_params = {'tau_m': 10.0, # membrane time constant (ms)
# excitatory synaptic time constant (ms)
'tau_syn_ex': 0.5,
# inhibitory synaptic time constant (ms)
'tau_syn_in': 0.5,
't_ref': 2.0, # absolute refractory period (ms)
'E_L': -65.0, # resting membrane potential (mV)
'V_th': -50.0, # spike threshold (mV)
'C_m': 250.0, # membrane capacitance (pF)
'V_reset': -65.0 # reset potential (mV)
}
self.nodes_e = None
self.nodes_i = None
self.mean_ca_e = []
self.mean_ca_i = []
self.total_connections_e = []
self.total_connections_i = []
####################################################################################
# We initialize variables for the post-synaptic currents of the
# excitatory, inhibitory, and external synapses. These values were
# calculated from a PSP amplitude of 1 for excitatory synapses,
# -1 for inhibitory synapses and 0.11 for external synapses.
self.psc_e = 585.0
self.psc_i = -585.0
self.psc_ext = 6.2
def prepare_simulation(self):
nest.ResetKernel()
nest.set_verbosity('M_ERROR')
####################################################################################
# We set global kernel parameters. Here we define the resolution
# for the simulation, which is also the time resolution for the update
# of the synaptic elements.
nest.SetKernelStatus(
{
'resolution': self.dt
}
)
####################################################################################
# Set Structural Plasticity synaptic update interval which is how often
# the connectivity will be updated inside the network. It is important
# to notice that synaptic elements and connections change on different
# time scales.
nest.SetStructuralPlasticityStatus({
'structural_plasticity_update_interval': self.update_interval,
})
####################################################################################
# Now we define Structural Plasticity synapses. In this example we create
# two synapse models, one for excitatory and one for inhibitory synapses.
# Then we define that excitatory synapses can only be created between a
# pre-synaptic element called `Axon_ex` and a post synaptic element
# called `Den_ex`. In a similar manner, synaptic elements for inhibitory
# synapses are defined.
nest.CopyModel('static_synapse', 'synapse_ex')
nest.SetDefaults('synapse_ex', {'weight': self.psc_e, 'delay': 1.0})
nest.CopyModel('static_synapse', 'synapse_in')
nest.SetDefaults('synapse_in', {'weight': self.psc_i, 'delay': 1.0})
nest.SetStructuralPlasticityStatus({
'structural_plasticity_synapses': {
'synapse_ex': {
'synapse_model': 'synapse_ex',
'post_synaptic_element': 'Den_ex',
'pre_synaptic_element': 'Axon_ex',
},
'synapse_in': {
'synapse_model': 'synapse_in',
'post_synaptic_element': 'Den_in',
'pre_synaptic_element': 'Axon_in',
},
}
})
def create_nodes(self):
"""
Assign growth curves to synaptic elements
"""
synaptic_elements = {
'Den_ex': self.growth_curve_e_e,
'Den_in': self.growth_curve_e_i,
'Axon_ex': self.growth_curve_e_e,
}
synaptic_elements_i = {
'Den_ex': self.growth_curve_i_e,
'Den_in': self.growth_curve_i_i,
'Axon_in': self.growth_curve_i_i,
}
####################################################################################
# Then it is time to create a population with 80% of the total network
# size excitatory neurons and another one with 20% of the total network
# size of inhibitory neurons.
self.nodes_e = nest.Create('iaf_psc_alpha',
self.number_excitatory_neurons,
{'synaptic_elements': synaptic_elements})
self.nodes_i = nest.Create('iaf_psc_alpha',
self.number_inhibitory_neurons,
{'synaptic_elements': synaptic_elements_i})
self.nodes_e.synaptic_elements = synaptic_elements
self.nodes_i.synaptic_elements = synaptic_elements_i
def connect_external_input(self):
"""
We create and connect the Poisson generator for external input
"""
noise = nest.Create('poisson_generator')
noise.rate = self.bg_rate
nest.Connect(noise, self.nodes_e, 'all_to_all',
{'weight': self.psc_ext, 'delay': 1.0})
nest.Connect(noise, self.nodes_i, 'all_to_all',
{'weight': self.psc_ext, 'delay': 1.0})
####################################################################################
# In order to save the amount of average calcium concentration in each
# population through time we create the function ``record_ca``. Here we use the
# ``GetStatus`` function to retrieve the value of `Ca` for every neuron in the
# network and then store the average.
def record_ca(self):
ca_e = self.nodes_e.Ca, # Calcium concentration
self.mean_ca_e.append(numpy.mean(ca_e))
ca_i = self.nodes_i.Ca, # Calcium concentration
self.mean_ca_i.append(numpy.mean(ca_i))
####################################################################################
# In order to save the state of the connectivity in the network through time
# we create the function ``record_connectivity``. Here we use the ``GetStatus``
# function to retrieve the number of connected pre-synaptic elements of each
# neuron. The total amount of excitatory connections is equal to the total
# amount of connected excitatory pre-synaptic elements. The same applies for
# inhibitory connections.
def record_connectivity(self):
syn_elems_e = self.nodes_e.synaptic_elements
syn_elems_i = self.nodes_i.synaptic_elements
self.total_connections_e.append(sum(neuron['Axon_ex']['z_connected']
for neuron in syn_elems_e))
self.total_connections_i.append(sum(neuron['Axon_in']['z_connected']
for neuron in syn_elems_i))
####################################################################################
# We define a function to plot the recorded values
# at the end of the simulation.
def plot_data(self):
fig, ax1 = plt.subplots()
ax1.axhline(self.growth_curve_e_e['eps'],
linewidth=4.0, color='#9999FF')
ax1.plot(self.mean_ca_e, 'b',
label='Ca Concentration Excitatory Neurons', linewidth=2.0)
ax1.axhline(self.growth_curve_i_e['eps'],
linewidth=4.0, color='#FF9999')
ax1.plot(self.mean_ca_i, 'r',
label='Ca Concentration Inhibitory Neurons', linewidth=2.0)
ax1.set_ylim([0, 0.275])
ax1.set_xlabel("Time in [s]")
ax1.set_ylabel("Ca concentration")
ax2 = ax1.twinx()
ax2.plot(self.total_connections_e, 'm',
label='Excitatory connections', linewidth=2.0, linestyle='--')
ax2.plot(self.total_connections_i, 'k',
label='Inhibitory connections', linewidth=2.0, linestyle='--')
ax2.set_ylim([0, 2500])
ax2.set_ylabel("Connections")
ax1.legend(loc=1)
ax2.legend(loc=4)
plt.savefig('StructuralPlasticityExample.eps', format='eps')
####################################################################################
# It is time to specify how we want to perform the simulation. In this
# function we first enable structural plasticity in the network and then we
# simulate in steps. On each step we record the calcium concentration and the
# connectivity. At the end of the simulation, the plot of connections and
# calcium concentration through time is generated.
def simulate(self):
if nest.NumProcesses() > 1:
sys.exit("For simplicity, this example only works " +
"for a single process.")
nest.EnableStructuralPlasticity()
print("Starting simulation")
sim_steps = numpy.arange(0, self.t_sim, self.record_interval)
for i, step in enumerate(sim_steps):
nest.Simulate(self.record_interval)
self.record_ca()
self.record_connectivity()
if i % 20 == 0:
print("Progress: " + str(i / 2) + "%")
print("Simulation finished successfully")
####################################################################################
# Finally we take all the functions that we have defined and create the sequence
# for our example. We prepare the simulation, create the nodes for the network,
# connect the external input and then simulate. Please note that as we are
# simulating 200 biological seconds in this example, it will take a few minutes
# to complete.
if __name__ == '__main__':
example = StructralPlasticityExample()
# Prepare simulation
example.prepare_simulation()
example.create_nodes()
example.connect_external_input()
# Start simulation
example.simulate()
example.plot_data()
| gpl-2.0 |
kirel/political-affiliation-prediction | newsreader.py | 2 | 11936 | # -*- coding: utf-8 -*-
from sklearn.decomposition import KernelPCA
from sklearn.metrics.pairwise import pairwise_distances
from scipy.stats.mstats import zscore
import glob
import json
import re
import datetime
import os
import cPickle
import codecs
import itertools
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy import double,triu,ones,hstack,arange,reshape,zeros,setdiff1d,array,zeros,eye,argmax,percentile
def get_news(sources=['spiegel','faz','welt','zeit'], folder='model'):
'''
Collects all news articles from political ressort of major German newspapers
Articles are transformed to BoW vectors and assigned to a political party
For better visualization, articles' BoW vectors are also clustered into topics
INPUT
folder the model folder containing classifier and BoW transformer
sources a list of strings for each newspaper for which a crawl is implemented
default ['zeit','sz']
'''
import classifier
from bs4 import BeautifulSoup
from api import fetch_url
import urllib2
news = dict([(source,[]) for source in sources])
# the classifier for prediction of political affiliation
clf = classifier.Classifier(folder=folder)
for source in sources:
if source is 'spiegel':
# fetching articles from sueddeutsche.de/politik
url = 'http://www.spiegel.de/politik'
site = BeautifulSoup(urllib2.urlopen(url).read())
titles = site.findAll("div", { "class" : "teaser" })
urls = ['http://www.spiegel.de'+a.findNext('a')['href'] for a in titles]
if source is 'faz':
# fetching articles from sueddeutsche.de/politik
url = 'http://www.faz.net/aktuell/politik'
site = BeautifulSoup(urllib2.urlopen(url).read())
titles = site.findAll("a", { "class" : "TeaserHeadLink" })
urls = ['http://www.faz.net'+a['href'] for a in titles]
if source is 'welt':
# fetching articles from sueddeutsche.de/politik
url = 'http://www.welt.de/politik'
site = BeautifulSoup(urllib2.urlopen(url).read())
titles = site.findAll("a", { "class" : "as_teaser-kicker" })
urls = [a['href'] for a in titles]
if source is 'sz-without-readability':
# fetching articles from sueddeutsche.de/politik
url = 'http://www.sueddeutsche.de/politik'
site = BeautifulSoup(urllib2.urlopen(url).read())
titles = site.findAll("div", { "class" : "teaser" })
urls = [a.findNext('a')['href'] for a in titles]
if source is 'zeit':
# fetching articles from zeit.de/politik
url = 'http://www.zeit.de/politik'
site = BeautifulSoup(urllib2.urlopen(url).read())
titles = site.findAll("span", { "class" : "supertitle" })
urls = [a.parent['href'] for a in titles if a.parent['href'].find('/2015-')>0]
print "Found %d articles on %s"%(len(urls),url)
# predict party from url for this source
print "Predicting %s"%source
articles = []
for url in urls:
try:
title,text = fetch_url(url)
prediction = clf.predict(text)
prediction['url'] = url
articles.append((title,prediction))
except:
print('Could not get text from %s'%url)
pass
news[source] = dict(articles)
# save results
datestr = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
open(folder+'/news-%s'%(datestr) + '.json', 'wb').write(json.dumps(news,ensure_ascii=False).encode('utf8'))
def all_saved_news(folder='model'):
import glob
from string import digits
# get just the most recent news articles file (assuming date label ordering)
news = json.load(open(glob.glob(folder+'/news*.json')[-1],"r"))
# collect text data from all articles
articles, data = [], []
for source in news.keys():
for title, article in news[source].items():
# remove numbers
for d in digits: article['text'] = article['text'].replace(d,'')
data.append(article['text'])
predictions = [prediction['probability'] for prediction in article['prediction']]
articles.append({
'source':source,
'title':title,
'url':article['url'],
'prediction':article['prediction'],
'predictedLabel':article['prediction'][argmax(predictions)]['party']
})
return articles, data
def pairwise_dists(data, nneighbors=10, folder='model', dist='l2'):
'''
Computes pairwise distances between bag-of-words vectors of articles
INPUT
folder model folder
nneighbors number of closest neighbors to include in distance list
'''
stopwords = codecs.open("stopwords.txt", "r", encoding="utf-8", errors='ignore').readlines()[5:]
stops = map(lambda x:x.lower().strip(),stopwords)
# using now stopwords and filtering out digits
bow = TfidfVectorizer(min_df=2,stop_words=stops)
X = bow.fit_transform(data)
print 'Computing %s pairwise distances'%dist
# KPCA transform bow vectors
if dist is 'l2_kpca_zscore':
K = pairwise_distances(X,metric='l2',n_jobs=1)
perc = 50.0
width = percentile(K.flatten(),perc)
Xc = zscore(KernelPCA(n_components=50,kernel='rbf',gamma=width).fit_transform(X))
K = pairwise_distances(Xc,metric='l2',n_jobs=1)
elif dist is 'l2_kpca':
K = pairwise_distances(X,metric='l2',n_jobs=1)
perc = 100./len(data)
width = percentile(K.flatten(),perc)
Xc = KernelPCA(n_components=50,kernel='rbf',gamma=width).fit_transform(X)
K = pairwise_distances(Xc,metric='l2',n_jobs=1)
elif dist is 'l2':
K = pairwise_distances(X,metric='l2',n_jobs=1)
elif dist is 'l1':
K = pairwise_distances(X,metric='l1',n_jobs=1)
# collect closest neighbors
distances = []
for urlidx in range(len(data)):
idx = (K[urlidx,:]).argsort()[1:nneighbors+1]
for sidx in idx:
distances.append([urlidx,sidx,(idx==sidx).nonzero()[0][0]])
return distances
def load_sentiment(negative='SentiWS_v1.8c/SentiWS_v1.8c_Negative.txt',\
positive='SentiWS_v1.8c/SentiWS_v1.8c_Positive.txt'):
words = dict()
for line in open(negative).readlines():
parts = line.strip('\n').split('\t')
words[parts[0].split('|')[0]] = double(parts[1])
if len(parts)>2:
for inflection in parts[2].strip('\n').split(','):
words[inflection] = double(parts[1])
for line in open(positive).readlines():
parts = line.strip('\n').split('\t')
words[parts[0].split('|')[0]] = double(parts[1])
if len(parts)>2:
for inflection in parts[2].strip('\n').split(','):
words[inflection] = double(parts[1])
return words
def get_sentiments(data):
# filtering out some noise words
stops = map(lambda x:x.lower().strip(),open('stopwords.txt').readlines()[6:])
# vectorize non-stopwords
bow = TfidfVectorizer(min_df=2,stop_words=stops)
X = bow.fit_transform(data)
# map sentiment vector to bow space
words = load_sentiment()
sentiment_vec = zeros(X.shape[1])
for key in words.keys():
if bow.vocabulary_.has_key(key):
sentiment_vec[bow.vocabulary_[key]] = words[key]
# compute sentiments
return X.dot(sentiment_vec)
def kpca_cluster(data,nclusters=100,ncomponents=40,topwhat=10,zscored=False):
'''
Computes clustering of bag-of-words vectors of articles
INPUT
folder model folder
nclusters number of clusters
'''
from sklearn.cluster import KMeans
# filtering out some noise words
stops = map(lambda x:x.lower().strip(),open('stopwords.txt').readlines()[6:])
# vectorize non-stopwords
bow = TfidfVectorizer(min_df=2,stop_words=stops)
X = bow.fit_transform(data)
# creating bow-index-to-word map
idx2word = dict(zip(bow.vocabulary_.values(),bow.vocabulary_.keys()))
# using now stopwords and filtering out digits
print 'Computing pairwise distances'
K = pairwise_distances(X,metric='l2',n_jobs=1)
perc = 50.0
width = percentile(K.flatten(),perc)
# KPCA transform bow vectors
Xc = KernelPCA(n_components=ncomponents,kernel='rbf',gamma=width).fit_transform(X)
if zscored:
Xc = zscore(Xc)
# compute clusters
km = KMeans(n_clusters=nclusters).fit(Xc)
Xc = km.predict(Xc)
clusters = []
for icluster in range(nclusters):
nmembers = (Xc==icluster).sum()
if True:#nmembers < len(data) / 5.0 and nmembers > 1: # only group clusters big enough but not too big
members = (Xc==icluster).nonzero()[0]
topwordidx = array(X[members,:].sum(axis=0))[0].argsort()[-topwhat:][::-1]
topwords = ' '.join([idx2word[wi] for wi in topwordidx])
meanDist = triu(pairwise_distances(X[members,:],metric='l2',n_jobs=1)).sum()
meanDist = meanDist / (len(members) + (len(members)**2 - len(members))/2.0)
# print u'Cluster %d'%icluster + u' %d members'%nmembers + u' mean Distance %f'%meanDist + u'\n\t'+topwords
clusters.append({
'name':'Cluster-%d'%icluster,
'description': topwords,
'members': list(members),
'meanL2Distances': meanDist
})
return clusters
def party_cluster(articles):
clusters = []
keyf = lambda a: a[1]['predictedLabel']
for k, group in itertools.groupby(sorted(enumerate(articles), key=keyf), keyf):
clusters.append({
'name': k,
'description': k,
'members': [index_article_tuple[0] for index_article_tuple in group]
})
return clusters
def write_distances_json(folder='model'):
articles, data = all_saved_news(folder)
dists = ['l2_kpca']
distances_json = {
'articles': articles,
'sentiments': json.dumps(get_sentiments(data).tolist()),
'distances': [
{ 'name': dist, 'distances': pairwise_dists(data,dist = dist) } for dist in dists
],
'clusterings': [
{ 'name': 'Parteivorhersage', 'clusters': party_cluster(articles) },
{ 'name': 'Ähnlichkeit', 'clusters': kpca_cluster(data,nclusters=len(articles)/2,ncomponents=40,zscored=False) },
]
}
# save article with party prediction and distances to closest articles
datestr = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
open(folder+'/distances-%s'%(datestr)+'.json', 'wb').write(json.dumps(distances_json))
# also save that latest version for the visualization
open(folder+'/distances.json', 'wb').write(json.dumps(distances_json))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(\
description='Downloads, transforms and clusters news articles')
parser.add_argument('-f','--folder',help='Folder to store text files [./model]',\
default='model')
parser.add_argument('-d','--download',help='If files should be downloaded',\
action='store_true', default=False)
parser.add_argument('-p','--distances',help='If pairwise distances of text should be computed',\
action='store_true', default=False)
args = vars(parser.parse_args())
if not os.path.isdir(args['folder']):
os.mkdir(args['folder'])
if args['download']:
get_news(folder=args['folder'])
if args['distances']:
write_distances_json(folder=args['folder'])
| mit |
ARM-software/lisa | lisa/analysis/thermal.py | 2 | 7490 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2017, Arm Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from matplotlib.ticker import MaxNLocator
from devlib.utils.misc import list_to_mask, mask_to_list
from lisa.analysis.base import TraceAnalysisBase
from lisa.utils import memoized
from lisa.trace import requires_events, CPU
from lisa.datautils import df_refit_index, series_refit_index
class ThermalAnalysis(TraceAnalysisBase):
"""
Support for plotting Thermal Analysis data
:param trace: input Trace object
:type trace: :class:`trace.Trace`
"""
name = 'thermal'
@requires_events("thermal_temperature")
def df_thermal_zones_temperature(self):
"""
Get the temperature of the thermal zones
:returns: a :class:`pandas.DataFrame` with:
* An ``id`` column (The thermal zone ID)
* A ``thermal_zone`` column (The thermal zone name)
* A ``temp`` column (The reported temperature)
"""
df = self.trace.df_event("thermal")
df = df[['id', 'thermal_zone', 'temp']]
return df
@TraceAnalysisBase.cache
@requires_events("thermal_power_cpu_limit")
def df_cpufreq_cooling_state(self, cpus=None):
"""
Get cpufreq cooling device states
:param cpus: The CPUs to consider (all by default)
:type cpus: list(int)
:returns: a :class:`pandas.DataFrame` with:
* An ``cpus`` column (The CPUs affected by the cooling device)
* A ``freq`` column (The frequency limit)
* A ``cdev_state`` column (The cooling device state index)
"""
df = self.trace.df_event("thermal_power_cpu_limit")
df = df[['cpus', 'freq', 'cdev_state']]
if cpus is not None:
# Find masks that match the requested CPUs
# This can include other CPUs
masks = self._matching_masks(cpus)
df = df[df.cpus.isin(masks)]
return df
@TraceAnalysisBase.cache
@requires_events("thermal_power_devfreq_limit")
def df_devfreq_cooling_state(self, devices=None):
"""
Get devfreq cooling device states
:param devices: The devfreq devices to consider (all by default)
:type device: list(str)
:returns: a :class:`pandas.DataFrame` with:
* An ``cpus`` column (The CPUs affected by the cooling device)
* A ``freq`` column (The frequency limit)
* A ``cdev_state`` column (The cooling device state index)
"""
df = self.trace.df_event("devfreq_out_power")
df = df[['type', 'freq', 'cdev_state']]
if devices is not None:
df = df[df.type.isin(devices)]
return df
@property
@memoized
@df_thermal_zones_temperature.used_events
def thermal_zones(self):
"""
Get thermal zone ids that appear in the trace
"""
df = self.df_thermal_zones_temperature()
return df["thermal_zone"].unique().tolist()
@property
@memoized
@df_cpufreq_cooling_state.used_events
def cpufreq_cdevs(self):
"""
Get cpufreq cooling devices that appear in the trace
"""
df = self.df_cpufreq_cooling_state()
res = df['cpus'].unique().tolist()
return [mask_to_list(mask) for mask in res]
@property
@memoized
@df_devfreq_cooling_state.used_events
def devfreq_cdevs(self):
"""
Get devfreq cooling devices that appear in the trace
"""
df = self.df_devfreq_cooling_state()
return df['type'].unique().tolist()
###############################################################################
# Plotting Methods
###############################################################################
@TraceAnalysisBase.plot_method()
@df_thermal_zones_temperature.used_events
def plot_thermal_zone_temperature(self, thermal_zone_id: int, axis, local_fig):
"""
Plot temperature of thermal zones (all by default)
:param thermal_zone_id: ID of the zone
:type thermal_zone_id: int
"""
window = self.trace.window
df = self.df_thermal_zones_temperature()
df = df[df.id == thermal_zone_id]
df = df_refit_index(df, window=window)
tz_name = df.thermal_zone.unique()[0]
series = series_refit_index(df['temp'], window=window)
series.plot(drawstyle="steps-post", ax=axis,
label=f"Thermal zone \"{tz_name}\"")
axis.legend()
if local_fig:
axis.grid(True)
axis.set_title("Temperature evolution")
axis.set_ylabel("Temperature (°C.10e3)")
@TraceAnalysisBase.plot_method()
@df_cpufreq_cooling_state.used_events
def plot_cpu_cooling_states(self, cpu: CPU, axis, local_fig):
"""
Plot the state evolution of a cpufreq cooling device
:param cpu: The CPU. Whole clusters can be controlled as
a single cooling device, they will be plotted as long this CPU
belongs to the cluster.
:type cpu: int
"""
window = self.trace.window
df = self.df_cpufreq_cooling_state([cpu])
df = df_refit_index(df, window=window)
cdev_name = f"CPUs {mask_to_list(df.cpus.unique()[0])}"
series = series_refit_index(df['cdev_state'], window=window)
series.plot(drawstyle="steps-post", ax=axis,
label=f"\"{cdev_name}\"")
axis.legend()
if local_fig:
axis.grid(True)
axis.set_title("cpufreq cooling devices status")
axis.yaxis.set_major_locator(MaxNLocator(integer=True))
axis.grid(axis='y')
@TraceAnalysisBase.plot_method()
def plot_dev_freq_cooling_states(self, device: str, axis, local_fig):
"""
Plot the state evolution of a devfreq cooling device
:param device: The devfreq devices to consider
:type device: str
"""
df = self.df_devfreq_cooling_state([device])
df = df_refit_index(df, window=self.trace.window)
df['cdev_state'].plot(drawstyle="steps-post", ax=axis,
label=f"Device \"{device}\"")
axis.legend()
if local_fig:
axis.grid(True)
axis.set_title("devfreq cooling devices status")
axis.yaxis.set_major_locator(MaxNLocator(integer=True))
axis.grid(axis='y')
###############################################################################
# Utility Methods
###############################################################################
def _matching_masks(self, cpus):
df = self.trace.df_event('thermal_power_cpu_limit')
global_mask = list_to_mask(cpus)
cpumasks = df['cpus'].unique().tolist()
return [m for m in cpumasks if m & global_mask]
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
| apache-2.0 |
idlead/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
xuyifan0720/image-processor | photo_editor.py | 1 | 7183 | import numpy as np
import cv2
from matplotlib import pyplot as plt
import os
from Tkinter import *
from PIL import Image, ImageStat
import math
import argparse
class PhotoEditor:
def __init__(self, directory_name, destination, brightness,blur):
self.img = None
self.imageFile = None
self.directory = directory_name
self.destination = destination
self.brightness = int(brightness)
self.index = 1
self.blur = blur
self.contrastConstant = 0
def maskCreation(self, img):
# converting to gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
lower = np.array([130, 130, 70], dtype = "uint8")
upper = np.array([255, 175, 135], dtype = "uint8")
#masks ppl's face skin
mask = cv2.inRange(ycrcb, lower, upper)
lowerhsv = np.array([7, 90, 100], dtype="uint8")
upperhsv = np.array([14, 255, 255], dtype="uint8")
#masks ppl's face skin
hsv = cv2.GaussianBlur(hsv,(3,3),0)
mask2 = cv2.inRange(hsv, lowerhsv, upperhsv)
mask += mask2
# remove noise
img2 = cv2.GaussianBlur(gray,(3,3),0)
#laplacian detects the edges
laplacian = cv2.Laplacian(img2,cv2.CV_64F,ksize=1,scale=0.02,delta=0)
sobel = cv2.Sobel(img2,cv2.CV_64F,1,1,ksize=1,scale = 0.02,delta=0)
#adds laplacian and mask you get edges on people's face
finalMask = cv2.bitwise_and(laplacian,laplacian,mask=mask)
finalMask2 = cv2.bitwise_and(sobel,sobel,mask=mask)
#img2gray = cv2.cvtColor(finalMask2,cv2.COLOR_HSV2BGR)
#newMask = cv2.threshold(finalMask2, 10, 255, cv2.THRESH_BINARY)
#appliedMask = cv2.bitwise_and(img,img,mask=finalMask)
#makes a new mask according to finalMask
for i in range(0,len(finalMask)):
for j in range(0,len(finalMask[0])):
if finalMask[i,j]<0.05:
mask[i,j]=0
else:
mask[i,j]=255
#if img[i,j,0]+img[i,j,1]+img[i,j,2]>300 and img[i,j,0]+img[i,j,1]+img[i,j,2]<350:
#mask[i,j]=0
mask = cv2.GaussianBlur(mask,(5,5),0)
return mask
def saveImg(self,img):
rand = self.index
self.index = self.index + 1
fileName = self.destination+"/Updated-"+os.path.basename(self.imageFile)+".jpg"
cv2.imwrite(fileName, img)
def pimpleRemoval(self,mask,img):
image = cv2.medianBlur(img, 17)
img1 = cv2.bitwise_and(image,image,mask=mask)
img2 = cv2.bitwise_and(img,img,mask=mask)
img2 = img-img2
final = img1+img2
return final
def adjust_gamma(self, image, gamma):
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
def contrast_adjustment(self,image,constant):
table1 = np.array([0.5**(1-constant)*(i/255.0)**constant*255 for i in np.arange(0,128)]).astype("uint8")
table2 = np.array([-0.5**(1-constant)*(1-i/255.0)**constant*255+255 for i in np.arange(128,256)]).astype("uint8")
table = np.append(table1,table2)
return cv2.LUT(image,table)
def adjust(self):
result = self.img
for i in range(0,5):
currentBright = self.calcBrightness(result)
print("currentBright")
print(currentBright)
targetDiff = self.brightness-currentBright
adjust_constant = (targetDiff)*0.01+1
cv2_im = cv2.cvtColor(result,cv2.COLOR_BGR2RGB)
pil_im = Image.fromarray(cv2_im)
gs = (math.sqrt(0.241*(r**2) + 0.691*(g**2) + 0.068*(b**2))for r,g,b in pil_im.getdata())
gslist = list(gs)
diff = float(max(gslist)-min(gslist))
#adjust_constant = adjust_constant+(diff-100)/400
print("adjust_constant")
print(adjust_constant)
result = self.adjust_gamma(result,adjust_constant)
print("first round of process done")
oldBright = self.calcBrightness(self.img)
newBright = self.calcBrightness(result)
constant = (newBright/oldBright)
#result = self.saturation_adjust(result,constant)
print("saturation done")
mask = self.maskCreation(self.img)
print("mask creation done")
if self.blur == "yes":
result = self.pimpleRemoval(mask,result)
result = self.contrast_adjustment(result,constant)
self.saveImg(result)
def testLimit(self,n):
if n > 255:
return 255
else:
return n
def saturation_adjust(self,img,constant):
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
hsv[:,:,1] = [self.testLimit(x*constant) for x in hsv[:,:,1]]
return cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
def calcBrightness(self,img):
cv2_im = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
pil_im = Image.fromarray(cv2_im)
stat = ImageStat.Stat(pil_im)
r,g,b = stat.rms
print("r is %f g is %f b is %f")%(r,g,b)
currentBright = math.sqrt(0.241*(r**2) + 0.691*(g**2) + 0.068*(b**2))
return currentBright
def blemishFix(self):
mask = self.maskCreation
removed = self.pimpleRemoval(mask)
self.saveImg(removed)
def loop(self, command):
for files in os.walk(self.directory):
for pic in files:
for pics in pic:
if pics.endswith(".jpg"):
print("start processing")
print(pics)
pics = self.directory+"/"+pics
self.imageFile = pics
self.img = cv2.imread(pics)
command()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Arguments for UDI Transfer')
parser.add_argument('-s', action="store", dest="storage", required=True,
help="path where you store your source pictures.")
parser.add_argument('-d', action="store", dest="destination", required=True,
help="destination path where you store your updated pictures.")
parser.add_argument('-f', action="store", dest="brightness", required=True,
help="desired brightness.")
parser.add_argument('-g', action="store", dest="blur", required=False, default = "yes",
help="yes or no whether you want to do blemish adjust.")
args = parser.parse_args()
if not os.path.exists(args.storage):
print("Source storage folder doesn't exist!")
exit()
if not os.path.exists(args.destination):
os.mkdir(args.destination)
editor = PhotoEditor(args.storage, args.destination,args.brightness,args.blur)
editor.loop(editor.adjust)
| mit |
JeanKossaifi/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | 8061 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
vortex-ape/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
lhogstrom/ThornsInRoses | pubmed_grab/pubmed_api.py | 1 | 3160 | from Bio import Entrez
import numpy as np
import pandas as pd
### pull info from pubmed api
outDir = '/Users/hogstrom/Dropbox (MIT)/pubmed_scrp'
Entrez.email = "Your.Name.Here@example.org"
### search for specific email in abstract
# handle = Entrez.efetch(db="pubmed", id="25081398", rettype="abstract")
# search_term = ".com&Cambridge, MA"
# search_term = ".com&Boston"
#search_term = "Novartis&Cambridge, MA"
# search_term = "oncology&.com&Cambridge, MA"
search_term = "regulatory&.com&Cambridge, MA"
oFile = outDir + '/regulatory_tbl.txt'
# search_term = "Oncotype&Cambridge"
# oFile = outDir + '/oncotype_dx_tbl.txt'
handle = Entrez.esearch(db="pubmed", rettype="abstract", term=search_term,retmax=100)
# handle = Entrez.esearch(db="pubmed", rettype="abstract", term="Cambridge, MA",retmax=10)
record = Entrez.read(handle)
handle.close()
idList = record['IdList']
if len(idList) == 0:
print "no resutlts for query: " +search_term
else:
### retrieve article info
handle = Entrez.efetch(db="pubmed", id=idList, rettype="abstract")
abstract_records = Entrez.read(handle)
handle.close()
#loop through for each - save article info
articles_dict = {}
for a_rec in abstract_records:
article_dict = {}
if a_rec.has_key('BookDocument'): # if book skip
continue
# title
article_dict['journal'] = a_rec['MedlineCitation']['Article']['Journal']['Title']
# date
date_entry = a_rec['MedlineCitation']['Article']['Journal']['JournalIssue']['PubDate']
# date_entry = a_rec['MedlineCitation']['Article']['Journal']['JournalIssue']['PubDate']['MedlineDate']
# date_entry = a_rec['MedlineCitation']['Article']['ArticleDate']
if date_entry.has_key('Year'):
# article_dict['Date'] = date_entry[0]['Year']
article_dict['Date'] = date_entry['Year']
# title
art_title = a_rec['MedlineCitation']['Article']['ArticleTitle']
art_title = ''.join([i if ord(i) < 128 else ' ' for i in art_title]) #remove non ascii
art_title = art_title.encode('utf-8')
article_dict['Title'] = art_title
# authors
authorList = a_rec['MedlineCitation']['Article']['AuthorList']
author_number = len(authorList)
if authorList[-1].has_key('LastName'): # check author name is listed
article_dict['last_author'] = authorList[-1]['LastName']
if authorList[-1]['AffiliationInfo']:
article_dict['last_author_affiliation'] = authorList[-1]['AffiliationInfo'][0]['Affiliation']
pID = a_rec['MedlineCitation']['PMID']
articles_dict[str(pID)] = article_dict
aFrm = pd.DataFrame(articles_dict)
aFrm = aFrm.T # transpose
# save file
aFrm.to_csv(oFile,sep='\t', encoding='utf-8')
# journal_name = record[0]['MedlineCitation']['Article']['Journal']['Title']
# abstract_text = record[0]['MedlineCitation']['Article']['Abstract']
# read abstract
# handle = Entrez.efetch(db="pubmed", id="25081398", retmode="text", rettype="abstract")
# handle.read()
### desired out-pu
#author, email, journal article, title, date, location, pubmed_id
| mit |
startcode/apollo | modules/tools/navigation/driving_behavior/plot_gps_path.py | 4 | 1469 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import sys
import pyproj
import matplotlib.pyplot as plt
projector = pyproj.Proj(proj='utm', zone=10, ellps='WGS84')
fig = plt.figure()
ax = plt.subplot2grid((1, 1), (0, 0))
styles = ['r-', 'b-']
i = 0
for fn in sys.argv[1:]:
X = []
Y = []
f = open(fn, 'r')
for line in f:
line = line.replace('\n', '')
vals = line.split(",")
if len(vals) < 3:
continue
print float(vals[-2]), float(vals[-1])
x, y = projector(float(vals[-1]), float(vals[-2]))
print x, y
X.append(x)
Y.append(y)
f.close()
ax.plot(X, Y, styles[i % len(styles)], lw=3, alpha=0.8)
i += 1
ax.axis('equal')
plt.show()
| apache-2.0 |
jblackburne/scikit-learn | examples/ensemble/plot_forest_importances.py | 168 | 1793 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
| bsd-3-clause |
dschien/PyExcelModelingHelper | tests/test_parameter.py | 1 | 4438 | import unittest
import pandas as pd
from excel_helper import Parameter, DistributionFunctionGenerator, GrowthTimeSeriesGenerator
from scipy import stats
class ParameterTestCase(unittest.TestCase):
def test_distribution_generate_values(self):
p = Parameter('test', module_name='numpy.random', distribution_name='normal', param_a=0, param_b=.1)
settings = {'sample_size': 32}
a = p(settings)
assert abs(stats.shapiro(a)[0] - 0.9) < 0.1
def test_ExponentialGrowthTimeSeriesDistributionFunctionParameter_generate_values(self):
p = Parameter('test', module_name='numpy.random', distribution_name='normal',
param_a=0,
param_b=.1)
settings = {
'use_time_series': True,
'times': pd.date_range('2009-01-01',
'2009-03-01',
freq='MS'),
'sample_size': 5,
'cagr': 0.1}
a = p(settings)
print(a)
# assert abs(stats.shapiro(a)[0] - 0.9) < 0.1
def test_ExponentialGrowthTimeSeriesDistributionFunctionParameter_generate_values_uniform(self):
p = Parameter('test', module_name='numpy.random', distribution_name='uniform',
param_a=1,
param_b=2)
settings = {
'use_time_series': True,
'times': pd.date_range('2009-01-01',
'2009-03-01',
freq='MS'),
'sample_size': 5,
'cagr': 1}
a = p(settings)
print(a)
# assert abs(stats.shapiro(a)[0] - 0.9) < 0.1
def test_ExponentialGrowthTimeSeriesDistributionFunctionParameter_generate_values_uniform_mean(self):
p = Parameter('test', module_name='numpy.random', distribution_name='uniform',
param_a=1, param_b=2, cagr=.1)
settings = {
'use_time_series': True,
'times': pd.date_range('2009-01-01', '2010-01-01', freq='MS'),
'sample_size': 1,
'sample_mean_value': True}
a = p(settings)
assert a.iloc[0] * 1.1 == a.iloc[-1]
def test_normal_zero_variance(self):
p = Parameter('a', module_name='numpy.random', distribution_name='normal', param_a=0,
param_b=0, )
q = Parameter('b', module_name='numpy.random', distribution_name='normal', param_a=0,
param_b=0)
settings = {'sample_size': 64, }
a = p(settings) * q(settings)
# print(a)
assert abs(stats.shapiro(a)[0] - 0.9) < 0.1
def test_get_mean_uniform(self):
p = Parameter('a', module_name='numpy.random', distribution_name='uniform', param_a=2,
param_b=4, )
settings = {
'sample_size': 5,
'sample_mean_value': True}
val = p(settings)
# print(val)
assert (val == 3).all()
def test_get_mean_normal_timeseries(self):
p = Parameter('test', module_name='numpy.random', distribution_name='normal',
param_a=3.,
param_b=.1, )
settings = {
'use_time_series': True,
'times': pd.date_range('2009-01-01',
'2009-03-01',
freq='MS'),
'sample_size': 5,
'cagr': 0,
'sample_mean_value': True}
val = p(settings)
# print(val)
assert (val == 3).all()
def test_get_mean_normal(self):
p = Parameter('a', module_name='numpy.random', distribution_name='normal', param_a=3, param_b=4, )
val = p()
# print(val)
# assert (val == 3).all()
def test_get_mean_choice(self):
p = Parameter('a', module_name='numpy.random', distribution_name='choice', param_a=3, param_b=4, )
settings = {'sample_mean_value': True, 'sample_size': 3}
val = p(settings)
# print(val)
assert (val == 3.5).all()
def test_get_mean_numerically(self):
p = Parameter('a', module_name='numpy.random', distribution_name='normal', param_a=3,
param_b=4)
settings = {'sample_mean_value': True, 'sample_size': 3}
val = p(settings)
# print(val)
assert (val == 3).all()
if __name__ == '__main__':
unittest.main()
| mit |
hugobuddel/orange3 | Orange/regression/linear_bfgs.py | 2 | 4661 | import numpy as np
from scipy.optimize import fmin_l_bfgs_b
from Orange.regression import Learner, Model
from Orange.preprocess import Normalize, Continuize, Impute, RemoveNaNColumns
__all__ = ["LinearRegressionLearner"]
class LinearRegressionLearner(Learner):
'''L2 regularized linear regression (a.k.a Ridge regression)
This model uses the L-BFGS algorithm to minimize the linear least
squares penalty with L2 regularization. When using this model you
should:
- Choose a suitable regularization parameter lambda_
- Consider appending a column of ones to the dataset (intercept term)
Parameters
----------
lambda\_ : float, optional (default=1.0)
Regularization parameter. It controls trade-off between fitting the
data and keeping parameters small. Higher values of lambda\_ force
parameters to be smaller.
preprocessors : list, optional (default="[Normalize(), Continuize(), Impute(), RemoveNaNColumns()])
Preprocessors are applied to data before training or testing. Default preprocessors
- transform the dataset so that the columns are on a similar scale,
- continuize all discrete attributes,
- remove columns with all values as NaN
- replace NaN values with suitable values
fmin_args : dict, optional
Parameters for L-BFGS algorithm.
"""
Examples
--------
import numpy as np
from Orange.data import Table
from Orange.regression.linear_bfgs import LinearRegressionLearner
data = Table('housing')
data.X = np.hstack((data.X, np.ones((data.X.shape[0], 1)))) # append ones
m = LinearRegressionLearner(lambda_=1.0)
c = m(data) # fit
print(c(data)) # predict
'''
name = 'linear_bfgs'
preprocessors = [Normalize(),
Continuize(),
Impute(),
RemoveNaNColumns()]
def __init__(self, lambda_=1.0, preprocessors=None, **fmin_args):
super().__init__(preprocessors=preprocessors)
self.lambda_ = lambda_
self.fmin_args = fmin_args
def cost_grad(self, theta, X, y):
t = X.dot(theta) - y
cost = t.dot(t)
cost += self.lambda_ * theta.dot(theta)
cost /= 2.0 * X.shape[0]
grad = X.T.dot(t)
grad += self.lambda_ * theta
grad /= X.shape[0]
return cost, grad
def fit(self, X, Y, W):
if len(Y.shape) > 1 and Y.shape[1] > 1:
raise ValueError('Linear regression does not support '
'multi-target classification')
if np.isnan(np.sum(X)) or np.isnan(np.sum(Y)):
raise ValueError('Linear regression does not support '
'unknown values')
theta = np.zeros(X.shape[1])
theta, cost, ret = fmin_l_bfgs_b(self.cost_grad, theta,
args=(X, Y.ravel()), **self.fmin_args)
return LinearRegressionModel(theta)
class LinearRegressionModel(Model):
def __init__(self, theta):
self.theta = theta
def predict(self, X):
return X.dot(self.theta)
if __name__ == '__main__':
import Orange.data
import sklearn.cross_validation as skl_cross_validation
np.random.seed(42)
def numerical_grad(f, params, e=1e-4):
grad = np.zeros_like(params)
perturb = np.zeros_like(params)
for i in range(params.size):
perturb[i] = e
j1 = f(params - perturb)
j2 = f(params + perturb)
grad[i] = (j2 - j1) / (2.0 * e)
perturb[i] = 0
return grad
d = Orange.data.Table('housing')
d.X = np.hstack((d.X, np.ones((d.X.shape[0], 1))))
d.shuffle()
# m = LinearRegressionLearner(lambda_=1.0)
# print(m(d)(d))
# # gradient check
# m = LinearRegressionLearner(lambda_=1.0)
# theta = np.random.randn(d.X.shape[1])
#
# ga = m.cost_grad(theta, d.X, d.Y.ravel())[1]
# gm = numerical_grad(lambda t: m.cost_grad(t, d.X, d.Y.ravel())[0], theta)
#
# print(np.sum((ga - gm)**2))
for lambda_ in (0.01, 0.03, 0.1, 0.3, 1, 3):
m = LinearRegressionLearner(lambda_=lambda_)
scores = []
for tr_ind, te_ind in skl_cross_validation.KFold(d.X.shape[0]):
s = np.mean((m(d[tr_ind])(d[te_ind]) - d[te_ind].Y.ravel())**2)
scores.append(s)
print('{:5.2f} {}'.format(lambda_, np.mean(scores)))
m = LinearRegressionLearner(lambda_=0)
print('test data', np.mean((m(d)(d) - d.Y.ravel())**2))
print('majority', np.mean((np.mean(d.Y.ravel()) - d.Y.ravel())**2))
| gpl-3.0 |
samchrisinger/osf.io | scripts/analytics/email_invites.py | 55 | 1332 | # -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from utils import plot_dates, mkdirp
user_collection = database['user']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'features')
mkdirp(FIG_PATH)
def analyze_email_invites():
invited = user_collection.find({'unclaimed_records': {'$ne': {}}})
dates_invited = [
user['date_registered']
for user in invited
]
if not dates_invited:
return
fig = plot_dates(dates_invited)
plt.title('email invitations ({}) total)'.format(len(dates_invited)))
plt.savefig(os.path.join(FIG_PATH, 'email-invites.png'))
plt.close()
def analyze_email_confirmations():
confirmed = user_collection.find({
'unclaimed_records': {'$ne': {}},
'is_claimed': True,
})
dates_confirmed = [
user['date_confirmed']
for user in confirmed
]
if not dates_confirmed:
return
fig = plot_dates(dates_confirmed)
plt.title('confirmed email invitations ({}) total)'.format(len(dates_confirmed)))
plt.savefig(os.path.join(FIG_PATH, 'email-invite-confirmations.png'))
plt.close()
def main():
analyze_email_invites()
analyze_email_confirmations()
if __name__ == '__main__':
main()
| apache-2.0 |
iyergkris/step-oracle | server/run.py | 1 | 5470 | #!/usr/bin/env python
"""
Created to Collect Real-time step count from an iPhone, which in turn collects it from an Apple Watch.
Authors: Gopal Iyer, Vikas Iyer Github: @iyergkris, @vikasiyer
For: Numenta HTM Challenge Hackathon
11/3/2015: 14:05
Cleanup:
"""
import sys
import importlib
import datetime
import csv
import nupic_anomaly_output
import socket
from optparse import OptionParser
from nupic.data.inference_shifter import InferenceShifter
from nupic.frameworks.opf.modelfactory import ModelFactory
DEFAULT_PLOT = False
DATETIME_FIELDNAME = 'datetime'
# Format in Input file: 2015/08/19 12:00:00
DATE_FORMAT = "%Y/%m/%d %H:%M:%S"
INPUT_FILE_NAME = "step-count-data"
DATA_DIR = "."
SERVER_ADDRESS = "192.168.210.166"
SERVER_PORT = 8888
# Options parsing.
parser = OptionParser(
usage="%prog [options]"
)
parser.add_option(
"-p",
"--plot",
action="store_true",
default=DEFAULT_PLOT,
dest="plot",
help="Plot results in matplotlib instead of writing to file "
"(requires matplotlib).")
parser.add_option(
"-l",
"--log",
action="store_true",
default=False,
dest="log",
help="Compute the log of anomaly likelihood "
"(this is more useful for plotting)")
def getModelParamsFromName(inputdataName):
importName = "model_params.%s_model_params" % (
inputdataName.replace(" ", "_").replace("-", "_")
)
print "Importing model params from %s" % importName
try:
importedModelParams = importlib.import_module(importName).MODEL_PARAMS
except ImportError:
raise Exception("No model params exist for '%s'. Run swarm first!"
% inputdataName)
return importedModelParams
def createModel(modelParams):
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": "steps"})
return model
"""
This is the main function to push data into the HTM and get prediction and anomalyScore data
Data source is being replaced with the Step count coming from an iPhone and Apple Watch real time data source
11/3/15: 14:05
Cleanup:
"""
def runModel(inputData, model, plot, logLikelihood):
inputFile = open(inputData, "rb")
csvReader = csv.reader(inputFile)
# skip header rows
csvReader.next()
shifter = InferenceShifter()
if plot:
output = nupic_anomaly_output.NuPICPlotOutput([INPUT_FILE_NAME], logLikelihood)
else:
output = nupic_anomaly_output.NuPICFileOutput([INPUT_FILE_NAME], logLikelihood)
for row in csvReader:
timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
value = float(row[1])
if value is not None:
result = model.run({
"timestamp": timestamp,
"steps": value
})
if plot:
result = shifter.shift(result)
prediction = result.inferences["multiStepBestPredictions"][1]
anomalyScore = result.inferences["anomalyScore"]
# print "Anomaly score %s" % anomalyScore
output.write(timestamp, value, prediction, anomalyScore)
inputFile.close()
"""
Below starts the code for Opening a UDP socket on port 8888 and waiting for message from client.
On receiving message from client (iPhone), it is fed into the HTM model (no data format checks for now >> hackathon)
Model returns prediction and anomaly data. Only prediction data is sent back to the Client (iPhone)
11/3/2015: 14:32:
Cleanup
"""
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind the socket to the port
server_address = (SERVER_ADDRESS, SERVER_PORT)
print >>sys.stderr, 'starting up on %s port %s' % server_address
sock.bind(server_address)
while True:
print >>sys.stderr, '\nwaiting to receive message'
data, address = sock.recvfrom(4096)
print >>sys.stderr, 'received %s bytes from %s' % (len(data), address)
print >>sys.stderr, data
# code to feed into NuPIC HTM Neural machine
watch_time, watch_steps = data.split(",")
timestamp = datetime.datetime.strptime(watch_time, DATE_FORMAT)
consumption = float(watch_steps)
result = model.run({
"timestamp": timestamp,
"steps": value
})
# result.metrics = metricsManager.update(result)
# print ("Prediction=%f" % (result.metrics["multiStepBestPredictions:multiStep:"
# "errorMetric='altMAPE':steps=1:window=1000:"
# "field=kw_energy_consumption"]))
# predictForString = result.metrics["multiStepBestPredictions:multiStep:"
# "errorMetric='altMAPE':steps=1:window=1000:"
# "field=kw_energy_consumption"]
if plot:
result = shifter.shift(result)
prediction = result.inferences["multiStepBestPredictions"][1]
print "Prediction: %s" % prediction
anomalyScore = result.inferences["anomalyScore"]
output.write(timestamp, value, prediction, anomalyScore)
if prediction:
sent = sock.sendto(str(prediction), address)
print >>sys.stderr, 'sent %s bytes back to %s' % (sent, address)
output.close()
if __name__ == "__main__":
(options, args) = parser.parse_args(sys.argv[1:])
plot = options.plot
# data = fetchData(url, river, stream, aggregate,
# {'limit': options.dataLimit})
# (min, max) = getMinMax(data, field)
modelParams = getModelParamsFromName(INPUT_FILE_NAME)
model = createModel(modelParams)
inputfilepath = "%s/%s.csv" % (DATA_DIR,INPUT_FILE_NAME.replace(" ","_"))
runModel(inputfilepath, model, plot, options.log)
| agpl-3.0 |
lmjohns3/speech-experiment | models/train-rica.py | 1 | 2008 | import climate
import lmj.plot as plt
import numpy as np
import seaborn as sns
import theanets
logging = climate.get_logger('rica')
import models
climate.add_arg('--codebook', metavar='FILE', help='save codebook to FILE')
climate.add_arg('--frames', type=int, metavar='T', help='train on sequences of T frames')
climate.add_arg('--overcomplete', type=float, default=2, metavar='K',
help='learn a Kx overcomplete codebook')
def main(args):
data = np.load(args.dataset, mmap_mode='r')
N = data.shape[1]
T = args.frames
K = int(N * T * args.overcomplete)
def batches():
batch = np.zeros((args.batch_size, N * T), 'f')
for b in range(args.batch_size):
o = np.random.randint(len(data) - T - 1)
batch[b] = data[o:o+T].ravel()
return [batch]
net = theanets.Autoencoder([N * T, (K, 'linear'), (N * T, 'tied')])
net.train(batches,
monitors={'hid1:out': (-0.1, -0.01, 0.01, 0.1)},
**models.kwargs_from_args(args))
D = net.find('hid1', 'w').get_value().T
R = 6
C = 18
dimg = np.zeros((R * (N + 1) - 1, C * (T + 1) - 1), float)
bimg = np.zeros((R * (N + 1) - 1, C * (T + 1) - 1), float)
idx = abs(D).max(axis=1).argsort()[::-1]
for r in range(R):
for c in range(C):
o = np.random.randint(len(data) - T - 1)
dimg[r*(N+1):r*(N+1)+N, c*(T+1):c*(T+1)+T] = data[o:o+T].T
bimg[r*(N+1):r*(N+1)+N, c*(T+1):c*(T+1)+T] = D[idx[r * C + c]].reshape((T, N)).T
_, (dax, bax) = plt.subplots(1, 2)
dax.imshow(dimg, cmap='coolwarm')
sns.despine(ax=dax, left=True, bottom=True)
dax.set_xticks([])
dax.set_yticks([])
bax.imshow(bimg, cmap='coolwarm')
sns.despine(ax=bax, left=True, bottom=True)
bax.set_xticks([])
bax.set_yticks([])
plt.show()
logging.info('%s: saving %s %s', args.codebook, D.shape, D.dtype)
np.save(args.codebook, D)
if __name__ == '__main__':
climate.call(main)
| mit |
thientu/scikit-learn | sklearn/cluster/tests/test_k_means.py | 63 | 26190 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
| bsd-3-clause |
vshtanko/scikit-learn | examples/model_selection/randomized_search.py | 201 | 3214 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
e-koch/VLA_Lband | 14B-088/HI/analysis/co_comparison/co_hi_cloud_spectra.py | 1 | 8453 |
'''
Use the clean cloud sample from cloud_catalog.py to find the CO and HI spectra
over each cloud.
'''
from astropy.io import fits
from astropy.wcs import WCS
from astropy.utils.console import ProgressBar
import astropy.units as u
from scipy import ndimage as nd
from spectral_cube import SpectralCube
from spectral_cube.lower_dimensional_structures import Projection
from spectral_cube.cube_utils import average_beams
from reproject import reproject_interp
import numpy as np
import matplotlib.pyplot as plt
from multiprocessing import Pool
import seaborn as sb
from signal_id import Noise
from cube_analysis.spectra_shifter import cube_shifter
from analysis.paths import (fourteenB_HI_data_path, iram_co21_data_path,
paper1_figures_path)
from analysis.constants import hi_freq, cube_name, moment1_name
from analysis.galaxy_params import gal
from analysis.plotting_styles import onecolumn_figure, align_yaxis
hi_cube = SpectralCube.read(fourteenB_HI_data_path(cube_name))
hi_beam = average_beams(hi_cube.beams)
hi_mom1 = fits.open(fourteenB_HI_data_path(moment1_name))[0]
# cloud_mask = \
# fits.open(iram_co21_data_path("m33.co21_new_assign_cprops_cleansample.fits"))[0]
# Improved cloud mask from Braine & Corbelli
cloud_mask_hdu = fits.open(iram_co21_data_path("asgn.fits"))[0]
# The cloud edges can be quite strict. Spectrally expand the mask one pixel
# in each direction
cloud_mask = nd.binary_dilation(cloud_mask_hdu.data > 0,
np.array([[[1, 1, 1]]]).T)
cube = SpectralCube.read(iram_co21_data_path("m33.co21_iram.fits"))
cube = cube.with_mask(cloud_mask)
# Make a SNR cube
# noise = Noise(cube)
# noise.estimate_noise(spectral_flat=True)
# noise.get_scale_cube()
# snr = SpectralCube(data=noise.snr.copy(), wcs=cube.wcs)
# Create a CO centroid map where the GMC mask is valid.
def peak_velocity(y, x, cube):
argmax = np.nanargmax(cube[:, y, x].value)
return cube.spectral_axis[argmax]
peak_intens = cube.max(0)
valid_mask = cloud_mask.sum(0) > 0
cube_specinterp = cube.spectral_interpolate(hi_cube.spectral_axis)
mom1 = cube_specinterp.moment1()
# mom1[~peak_mask] = np.NaN
peak_vels_arr = np.zeros_like(mom1) * np.NaN
pbar = ProgressBar(len(zip(*np.where(valid_mask))))
for y, x in zip(*np.where(valid_mask)):
peak_vels_arr[y, x] = peak_velocity(y, x, cube_specinterp)
pbar.update()
# Moment 1 and peak vels need to be somewhat close to each other
# max_diff = 20 * u.km / u.s
# bad_peaks = np.abs(peak_vels_arr - mom1) > max_diff
# peak_vels_arr[bad_peaks] = np.NaN
peak_vels = Projection(peak_vels_arr, unit=mom1.unit, wcs=mom1.wcs)
peak_vels.write(iram_co21_data_path("m33.co21_iram.masked.peakvel.fits", no_check=True))
mom1_reproj_arr = reproject_interp(mom1.hdu,
hi_cube.wcs.celestial,
shape_out=hi_cube.shape[1:])[0]
mom1_reproj = Projection(mom1_reproj_arr, unit=mom1.unit,
wcs=hi_cube.wcs.celestial)
peak_vels_reproj_arr = \
reproject_interp(peak_vels.hdu,
hi_cube.wcs.celestial,
shape_out=hi_cube.shape[1:])[0]
peak_vels_reproj = Projection(peak_vels_reproj_arr, unit=mom1.unit,
wcs=hi_cube.wcs.celestial)
peak_vels_reproj.write(iram_co21_data_path("m33.co21_iram.masked.peakvel.hireprojection.fits", no_check=True))
# hi_mom1_reproj_arr = reproject_interp(hi_mom1, cube.wcs.celestial,
# shape_out=mom1.shape)[0]
# hi_mom1_reproj = Projection(hi_mom1_reproj_arr, unit=mom1.unit,
# wcs=cube.wcs.celestial)
# vels_co = hi_mom1_reproj
# vels_hi = Projection(hi_mom1.data, header=hi_mom1.header, unit=u.m / u.s)
vels_co = peak_vels
vels_hi = peak_vels_reproj
# vels_co = mom1
# vels_hi = mom1_reproj
# Loop through the clouds, making the total/average profiles.
num_clouds = int(cloud_mask_hdu.data.max())
cloud_total_specs_co = np.zeros((num_clouds, cube.shape[0]))
cloud_total_specs_hi = np.zeros((num_clouds, hi_cube.shape[0]))
cloud_avg_specs_co = np.zeros((num_clouds, cube.shape[0]))
cloud_avg_specs_hi = np.zeros((num_clouds, hi_cube.shape[0]))
# Plot for each cloud
verbose = False
# pool = Pool(6)
pool = None
for i in ProgressBar(num_clouds):
# Find spatial extent of the cloud
plane = (cloud_mask_hdu.data == i + 1).sum(0) > 0
xy_posns = np.where(plane)
co_spec = np.zeros((cube.shape[0],))
co_count = np.zeros((cube.shape[0],))
# Shift wrt to CO peak or centroid.
co_shifted = cube_shifter(cube, vels_co, gal.vsys, xy_posns=xy_posns,
pool=pool, return_spectra=True,)
for spec in co_shifted[0]:
finites = np.isfinite(spec)
co_spec[finites] += spec.value[finites]
co_count += finites
cloud_total_specs_co[i] = co_spec
cloud_avg_specs_co[i] = co_spec / co_count
# Now regrid plane onto the HI and do the same.
plane_reproj = reproject_interp((plane, WCS(cloud_mask_hdu.header).celestial),
hi_cube.wcs.celestial,
shape_out=hi_cube.shape[1:])[0] > 0
xy_posns_hi = np.where(plane_reproj)
hi_spec = np.zeros((hi_cube.shape[0],))
hi_count = np.zeros((hi_cube.shape[0],))
hi_shifted = cube_shifter(hi_cube, vels_hi, gal.vsys,
xy_posns=xy_posns_hi,
pool=pool, return_spectra=True,)
for spec in hi_shifted[0]:
# if np.nanmax(spec.value) < 0.0066:
# continue
finites = np.isfinite(spec) & (spec > 0)
hi_spec[finites] += spec.value[finites]
hi_count += finites
cloud_total_specs_hi[i] = (hi_spec * hi_beam.jtok(hi_freq).value)
cloud_avg_specs_hi[i] = (hi_spec * hi_beam.jtok(hi_freq).value) / hi_count
if verbose:
onecolumn_figure()
sb.set_context("paper", rc={"font.family": "serif",
"figure.figsize": np.array([7.325, 5.9375])},
font_scale=1.5)
sb.set_palette("afmhot")
fig, ax = plt.subplots(3, 1,
sharex=True,
sharey=False, num=1)
plt.subplots_adjust(hspace=0.1,
wspace=0.1)
fig.text(0.5, 0.02, 'Velocity (km/s)', ha='center')
ax[0].plot(hi_cube.spectral_axis.to(u.km / u.s).value,
cloud_avg_specs_hi[i], 'b',
drawstyle='steps-mid', label='HI')
# For the legend
ax[0].plot([], [], 'r--', label="CO(2-1)")
ax2 = ax[0].twinx()
ax2.plot(cube.spectral_axis.to(u.km / u.s).value,
cloud_avg_specs_co[i] * 1000.,
'r--',
drawstyle='steps-mid', label='CO(2-1)')
align_yaxis(ax[0], 0, ax2, 0)
# ax[0].set_ylim([0, 1])
ax[0].set_xlim([gal.vsys.to(u.km / u.s).value - 40,
gal.vsys.to(u.km / u.s).value + 40])
ax[0].set_ylabel("HI Intensity (K)")
ax2.set_ylabel("CO Intensity (mK)")
ax[0].grid()
ax[0].legend(frameon=True, loc='upper right')
for spec in hi_shifted[0]:
if not np.isnan(spec).all():
ax[1].plot(hi_cube.spectral_axis.to(u.km / u.s).value,
spec.value * hi_beam.jtok(hi_freq).value, '--',
alpha=0.5, drawstyle='steps-mid')
ax[1].set_ylabel("HI Intensity (K)")
ax[1].grid()
for spec in co_shifted[0]:
if not np.isnan(spec).all():
ax[2].plot(cube.spectral_axis.to(u.km / u.s).value,
spec.to(u.mK).value, '--',
alpha=0.5, drawstyle='steps-mid')
ax[2].set_ylabel("CO Intensity (mK)")
ax[2].grid()
# plt.draw()
# raw_input("?")
# plt.clf()
fig.savefig(paper1_figures_path("GMC_CO_peakshift/GMC_CO_peakshift"
"_{}.png".format(i + 1)))
fig.savefig(paper1_figures_path("GMC_CO_peakshift/GMC_CO_peakshift"
"_{}.pdf".format(i + 1)))
plt.clf()
plt.close()
# Save the stacked spectra
np.savetxt("GMC_stackedspectra_peakvels_CO.txt", cloud_avg_specs_co)
np.savetxt("GMC_stackedspectra_peakvels_HI.txt", cloud_avg_specs_hi)
| mit |
quimaguirre/diana | scripts/compare_profiles.py | 1 | 35855 | import argparse
import configparser
import copy
import ntpath
import numpy as np
import pandas as pd
import time
import sys, os, re
from context import diana
import diana.classes.drug as diana_drug
import diana.classes.comparison as comparison
import diana.classes.network_analysis as network_analysis
import diana.classes.functional_analysis as functional_analysis
import diana.classes.top_scoring as top_scoring
import diana.toolbox.wrappers as wrappers
def main():
"""
Generate profiles for drugs using GUILD.
Optimized for Python 3.
python /home/quim/PHD/Projects/DIANA/diana/scripts/compare_profiles.py -d1 DB11699 -d2 DB00177 -sif /home/quim/PHD/Projects/DIANA/diana/data/network_cheng.sif
"""
options = parse_user_arguments()
compare_profiles(options)
def parse_user_arguments(*args, **kwds):
"""
Parses the arguments of the program
"""
parser = argparse.ArgumentParser(
description = "Compare the profiles of the input drugs",
epilog = "@oliva's lab 2017")
parser.add_argument('-j1','--job_id_drug1',dest='job_id_drug1',action = 'store',
help = """ Identifier of the drug number 1.
If the name of the drug has more than one word or special characters (parentheses, single quotes), introduce the name between
double quotes. """)
parser.add_argument('-j2','--job_id_drug2',dest='job_id_drug2',action = 'store',
help = """ Identifier of the drug number 2.
If the name of the drug has more than one word or special characters (parentheses, single quotes), introduce the name between
double quotes. """)
parser.add_argument('-sif','--sif_file',dest='sif',action = 'store',
help = 'Input file with a protein-protein interaction network in SIF format.')
parser.add_argument('-th','--threshold_list',dest='threshold_list',action = 'store',
help = """List of percentages that will be used as cut-offs to define the profiles of the drugs. It has to be a file containing:
- Different numbers that will be the threshold values separated by newline characters.
For example, a file called "top_threshold.list" containing:
0.1
0.5
1
5
10
""")
parser.add_argument('-ws','--workspace',dest='workspace',action = 'store',default=os.path.join(os.path.join(os.path.dirname(__file__), '..'), 'workspace'),
help = """Define the workspace directory where the data directory and the results directory will be created""")
options=parser.parse_args()
return options
#################
#################
# MAIN FUNCTION #
#################
#################
def compare_profiles(options):
"""
Compares the profiles of two input drugs
"""
# Start marker for time measure
start = time.time()
print("\n\t\t------------------------------------------------------------------------------------------------------------------------\n")
print("\t\tStarting Drug Interactions ANAlysis (DIANA), a program created by @OLIVA'S LAB. Second part: Comparison of drug profiles\n")
print("\t\t------------------------------------------------------------------------------------------------------------------------\n")
# Get the script path and define directories used
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
scripts_dir = os.path.join(main_path, 'scripts')
mappings_dir = os.path.join(main_path, 'mappings')
data_dir = os.path.join(main_path, 'data')
workspace_dir = options.workspace
create_directory(workspace_dir)
profiles_dir = os.path.join(workspace_dir, 'profiles')
# Create a directory for the results of the comparison
results_dir = os.path.join(workspace_dir, "comparisons")
create_directory(results_dir)
# Create directories for additional data
other_data_dir = os.path.join(workspace_dir, 'additional_data')
create_directory(other_data_dir)
random_networks_dir = os.path.join(other_data_dir, 'random_networks')
create_directory(random_networks_dir)
associations_dir = os.path.join(other_data_dir, 'gene_function_associations')
create_directory(associations_dir)
target_associations_dir = os.path.join(associations_dir, 'targets')
numbers_dir = os.path.join(other_data_dir, 'numbers')
create_directory(numbers_dir)
# Create a ComparisonResult instance
comparison_instance = comparison.ComparisonResult()
#--------------------------------------#
# GET INFORMATION FROM CONFIG FILE #
#--------------------------------------#
# Read the config file
config_file = os.path.join(main_path, 'config.ini')
config = configparser.ConfigParser()
config.read(config_file)
#--------------------#
# SIF CONTROLLER #
#--------------------#
# SIF CONTROLLER: Checks the network in SIF format provided by the user.
# Check if the network file is provided
if options.sif and fileExist(options.sif):
# Get the network name
network_filename = ntpath.basename(options.sif)
network_associations_dir = os.path.join(associations_dir, network_filename)
else:
# If not, we output an error
print(' DIANA INFO:\tThe network SIF file is missing. Please, introduce the parameter -sif.\n\t\tIf you do not have a network, use one of the networks in the sif folder.\n')
sys.exit(10)
#------------------------------#
# CREATE/READ NUMBERS FILE #
#------------------------------#
# Define parameters for the functional enrichment
type_functions = ['gobp', 'gomf', 'reactome']
type_corrections = ['fdr_bh', 'bonferroni']
# Numbers file associated to all targets
target_numbers_file = os.path.join(numbers_dir, 'target_numbers.txt')
if not fileExist(target_numbers_file):
with open(target_numbers_file, 'w') as num_fd:
num_fd.write('#feature\tnumber\n')
# Get targets
drugbank_geneid_mapping_file = os.path.join(mappings_dir, 'drugbank_geneid_drug_target_interactions.txt')
targets = diana_drug.get_all_targets_from_mappings(drugbank_geneid_mapping_file)
num_fd.write('target\t{}\n'.format(len(targets)))
# Get PFAMs
geneid_target_mapping_file = os.path.join(mappings_dir, 'geneid_target_mappings.txt')
pfams = diana_drug.get_all_pfams_from_mappings(geneid_target_mapping_file)
num_fd.write('pfam\t{}\n'.format(len(pfams)))
# Get functions
for type_function in type_functions:
associations_file = os.path.join(target_associations_dir, '{}_to_gene.txt'.format(type_function))
functions = functional_analysis.get_functions_from_associations_file(associations_file)
num_fd.write('{}\t{}\n'.format(type_function, len(functions)))
# Get ATCs
drugbank_atc_file = os.path.join(mappings_dir, 'drugbank_drug_atc.txt')
level_to_ATCs = diana_drug.get_all_atcs_from_mappings(drugbank_atc_file)
for level in ['level1', 'level2', 'level3', 'level4', 'level5']:
ATCs = set(level_to_ATCs[level])
num_fd.write('atc-{}\t{}\n'.format(level, len(ATCs)))
# Get SEs
drugbank_side_effects_file = os.path.join(mappings_dir, 'drugbank_drug_side_effects.txt')
ses = diana_drug.get_all_ses_from_mappings(drugbank_side_effects_file)
num_fd.write('se\t{}\n'.format(len(ses)))
target_numbers_df = pd.read_csv(target_numbers_file, sep='\t', index_col=None)
# Numbers file associated to network
network_numbers_file = os.path.join(numbers_dir, '{}_numbers.txt'.format(network_filename))
if not fileExist(network_numbers_file):
with open(network_numbers_file, 'w') as num_fd:
num_fd.write('#feature\tnumber\n')
# We create a Network instance
network_instance = network_analysis.Network(network_file=options.sif, type_id='geneid', network_format='sif')
# We keep the number of nodes
num_fd.write('node\t{}\n'.format(network_instance.network.number_of_nodes()))
num_fd.write('edge\t{}\n'.format(network_instance.network.number_of_edges()))
# Get functions
for type_function in type_functions:
associations_file = os.path.join(network_associations_dir, '{}_to_gene.txt'.format(type_function))
functions = functional_analysis.get_functions_from_associations_file(associations_file)
num_fd.write('{}\t{}\n'.format(type_function, len(functions)))
network_numbers_df = pd.read_csv(network_numbers_file, sep='\t', index_col=None)
#-------------------#
# READ PROFILES #
#-------------------#
print(' DIANA INFO:\tREADING PROFILES\n')
# Get the list of thresholds to create the profiles
if options.threshold_list and fileExist(options.threshold_list):
threshold_list = get_values_from_threshold_file(options.threshold_list)
else:
threshold_list = [1, 2, 5, 'functions']
print(' DIANA INFO:\tList of percentages used to define the drug profiles: {}\n'.format(', '.join([str(x) for x in threshold_list])))
# Check if the directories of the drugs exist
if options.job_id_drug1:
drug_dir1 = os.path.join(profiles_dir, options.job_id_drug1)
check_directory(drug_dir1)
else:
print(' DIANA INFO:\tjob_id_drug1 parameter is missing. Please, introduce the parameter -j1 with the job identifier of the drug.\n')
sys.exit(10)
if options.job_id_drug2:
drug_dir2 = os.path.join(profiles_dir, options.job_id_drug2)
check_directory(drug_dir2)
else:
print(' DIANA INFO:\tjob_id_drug2 parameter is missing. Please, introduce the parameter -j2 with the job identifier of the drug.\n')
sys.exit(10)
# Read profiles for drug 1
drug_instance1, guild_profile_instance1, scored_network_instance1, target_function_results1, guild_results1 = read_drug_profiles(drug_dir=drug_dir1, mappings_dir=mappings_dir, target_associations_dir=target_associations_dir, network_associations_dir=network_associations_dir, threshold_list=threshold_list)
# Read profiles for drug 2
drug_instance2, guild_profile_instance2, scored_network_instance2, target_function_results2, guild_results2 = read_drug_profiles(drug_dir=drug_dir2, mappings_dir=mappings_dir, target_associations_dir=target_associations_dir, network_associations_dir=network_associations_dir, threshold_list=threshold_list)
#----------------------#
# COMPARE PROFILES #
#----------------------#
# Compare targets
targets_dict1 = comparison.generate_targets_dict_for_comparison(drug_instance1.targets)
targets_dict2 = comparison.generate_targets_dict_for_comparison(drug_instance2.targets)
num_targets = int(target_numbers_df[target_numbers_df['#feature'] == 'target']['number'])
summary_targets = comparison.calculate_comparison(targets_dict1, targets_dict2, num_targets)
comparison_instance.add_target_result('target', summary_targets)
print(summary_targets)
# Compare PFAMs
pfams_dict1 = comparison.generate_targets_dict_for_comparison(drug_instance1.pfams)
pfams_dict2 = comparison.generate_targets_dict_for_comparison(drug_instance2.pfams)
num_pfams = int(target_numbers_df[target_numbers_df['#feature'] == 'pfam']['number'])
summary_pfams = comparison.calculate_comparison(pfams_dict1, pfams_dict2, num_pfams)
comparison_instance.add_target_result('pfam', summary_pfams)
print(summary_pfams)
# Compare functional profiles
for type_function in type_functions:
num_target_functions = int(target_numbers_df[target_numbers_df['#feature'] == type_function]['number'])
for type_correction in type_corrections:
targets_functions_instance1 = target_function_results1['{}-{}'.format(type_function, type_correction)]
targets_functions_instance2 = target_function_results2['{}-{}'.format(type_function, type_correction)]
summary_target_functions = comparison.calculate_comparison(targets_functions_instance1.term_id_to_values, targets_functions_instance2.term_id_to_values, num_target_functions)
comparison_instance.add_target_result('{}-{}'.format(type_function, type_correction), summary_target_functions)
print('Target: {} - {}'.format(type_function, type_correction))
print(summary_target_functions)
# Compare GUILD profiles
num_nodes = int(network_numbers_df[network_numbers_df['#feature'] == 'node']['number'])
num_edges = int(network_numbers_df[network_numbers_df['#feature'] == 'edge']['number'])
for top_threshold in threshold_list:
if top_threshold == 'functions':
for type_function in type_functions:
num_network_functions = int(network_numbers_df[network_numbers_df['#feature'] == type_function]['number'])
for type_correction in type_corrections:
# Compare node profiles
print('NODE PROFILES, THRESHOLD: {} - {} - {}'.format(top_threshold, type_function, type_correction))
node_profile1_values = copy.copy(guild_results1['node-{}-{}-{}'.format(top_threshold, type_function, type_correction)].node_to_score)
node_profile2_values = copy.copy(guild_results2['node-{}-{}-{}'.format(top_threshold, type_function, type_correction)].node_to_score)
guild_profile1_values = comparison.generate_guild_dict_for_comparison(guild_profile_instance1.node_to_score)
guild_profile2_values = comparison.generate_guild_dict_for_comparison(guild_profile_instance2.node_to_score)
summary_nodes = comparison.calculate_comparison_top_scoring(node_profile1_values, guild_profile1_values, node_profile2_values, guild_profile2_values, num_nodes)
comparison_instance.add_guild_result('node', top_threshold, summary_nodes)
print(summary_nodes)
# Compare edge profiles
print('EDGE PROFILES, THRESHOLD: {} - {} - {}'.format(top_threshold, type_function, type_correction))
edge_profile1_values = comparison.generate_guild_dict_for_comparison(guild_results1['edge-{}-{}-{}'.format(top_threshold, type_function, type_correction)].edge_to_score)
edge_profile2_values = comparison.generate_guild_dict_for_comparison(guild_results2['edge-{}-{}-{}'.format(top_threshold, type_function, type_correction)].edge_to_score)
scored_network1_values = comparison.generate_guild_dict_for_comparison(scored_network_instance1.edge_to_score)
scored_network2_values = comparison.generate_guild_dict_for_comparison(scored_network_instance2.edge_to_score)
summary_edges = comparison.calculate_comparison_top_scoring(edge_profile1_values, scored_network1_values , edge_profile2_values, scored_network2_values, num_edges)
comparison_instance.add_guild_result('edge', top_threshold, summary_edges)
print(summary_edges)
# Compare functional profiles
print('FUNCTIONAL PROFILES, THRESHOLD: {} - {} - {}'.format(top_threshold, type_function, type_correction))
functional_profile1_values = copy.copy(guild_results1['functional-{}-{}-{}'.format(top_threshold, type_function, type_correction)].term_id_to_values)
functional_profile2_values = copy.copy(guild_results2['functional-{}-{}-{}'.format(top_threshold, type_function, type_correction)].term_id_to_values)
summary_functions = comparison.calculate_comparison(functional_profile1_values, functional_profile2_values, num_network_functions)
comparison_instance.add_guild_result('{}-{}'.format(type_function, type_correction), top_threshold, summary_functions)
print(summary_functions)
else:
# Compare node profiles
print('NODE PROFILES, THRESHOLD: {}'.format(top_threshold))
node_profile1_values = comparison.generate_guild_dict_for_comparison(guild_results1['node-{}'.format(top_threshold)].node_to_score)
node_profile2_values = comparison.generate_guild_dict_for_comparison(guild_results2['node-{}'.format(top_threshold)].node_to_score)
guild_profile1_values = comparison.generate_guild_dict_for_comparison(guild_profile_instance1.node_to_score)
guild_profile2_values = comparison.generate_guild_dict_for_comparison(guild_profile_instance2.node_to_score)
summary_nodes = comparison.calculate_comparison_top_scoring(node_profile1_values, guild_profile1_values, node_profile2_values, guild_profile2_values, num_nodes)
comparison_instance.add_guild_result('node', top_threshold, summary_nodes)
print(summary_nodes)
# Compare edge profiles
print('EDGE PROFILES, THRESHOLD: {}'.format(top_threshold))
edge_profile1_values = comparison.generate_guild_dict_for_comparison(guild_results1['edge-{}'.format(top_threshold)].edge_to_score)
edge_profile2_values = comparison.generate_guild_dict_for_comparison(guild_results2['edge-{}'.format(top_threshold)].edge_to_score)
scored_network1_values = comparison.generate_guild_dict_for_comparison(scored_network_instance1.edge_to_score)
scored_network2_values = comparison.generate_guild_dict_for_comparison(scored_network_instance2.edge_to_score)
summary_edges = comparison.calculate_comparison_top_scoring(edge_profile1_values, scored_network1_values , edge_profile2_values, scored_network2_values, num_edges)
comparison_instance.add_guild_result('edge', top_threshold, summary_edges)
print(summary_edges)
for type_function in type_functions:
num_network_functions = int(network_numbers_df[network_numbers_df['#feature'] == type_function]['number'])
for type_correction in type_corrections:
# Compare functional profiles
print('FUNCTIONAL PROFILES, THRESHOLD: {}'.format(top_threshold))
functional_profile1_values = copy.copy(guild_results1['functional-{}-{}-{}'.format(top_threshold, type_function, type_correction)].term_id_to_values)
functional_profile2_values = copy.copy(guild_results2['functional-{}-{}-{}'.format(top_threshold, type_function, type_correction)].term_id_to_values)
summary_functions = comparison.calculate_comparison(functional_profile1_values, functional_profile2_values, num_network_functions)
comparison_instance.add_guild_result('{}-{}'.format(type_function, type_correction), top_threshold, summary_functions)
print(summary_functions)
# Compare structures
similarity_results = []
if len(drug_instance1.smiles) > 0 and len(drug_instance2.smiles) > 0:
for smiles1 in drug_instance1.smiles:
for smiles2 in drug_instance2.smiles:
similarity_result = comparison.get_smiles_similarity_indigo(smiles1, smiles2, fp_type = "sub", metric = "tanimoto")
if similarity_result:
similarity_results.append(similarity_result)
if len(similarity_results) > 0:
similarity_results = np.mean(similarity_results)
comparison_instance.structure_result = similarity_results
print(' DIANA INFO:\tStructural similarity between the two drugs: {:.3f}\n'.format(similarity_results))
else:
similarity_results = None
print(' DIANA INFO:\tStructural similarity unavailable\n')
else:
similarity_results = None
print(' DIANA INFO:\tThe SMILES of the drugs are missing! It is not possible to compute the structural similarity\n')
# Compare ATC profiles
for level in ['level1', 'level2', 'level3', 'level4', 'level5']:
num_atcs = int(target_numbers_df[target_numbers_df['#feature'] == 'atc-{}'.format(level)]['number'])
ATCs1 = drug_instance1.level_to_ATCs[level]
ATCs2 = drug_instance2.level_to_ATCs[level]
ATCs1_dict = comparison.generate_targets_dict_for_comparison(ATCs1)
ATCs2_dict = comparison.generate_targets_dict_for_comparison(ATCs2)
summary_ATCs = comparison.calculate_comparison(ATCs1_dict, ATCs2_dict, num_atcs)
comparison_instance.atc_results[level] = summary_ATCs
print('ATC comparison: {}'.format(level))
print(summary_ATCs)
# Compare SE profiles
num_ses = int(target_numbers_df[target_numbers_df['#feature'] == 'se']['number'])
SEs1_dict = comparison.generate_targets_dict_for_comparison(drug_instance1.SEs)
SEs2_dict = comparison.generate_targets_dict_for_comparison(drug_instance2.SEs)
summary_SEs = comparison.calculate_comparison(SEs1_dict, SEs2_dict, num_ses)
comparison_instance.se_result = summary_SEs
print(summary_SEs)
# Calculate network proximity
network = wrappers.get_network(options.sif, only_lcc = True)
nodes_from = drug_instance1.targets_in_network
nodes_to = drug_instance2.targets_in_network
d, z, (mean, sd) = wrappers.calculate_proximity(network, nodes_from, nodes_to, min_bin_size = 2)
print (d, z, (mean, sd))
#-------------------#
# WRITE RESULTS #
#-------------------#
# Write the results table
comparison_id = '{}_vs_{}'.format(options.job_id_drug1, options.job_id_drug2)
results_table = os.path.join(results_dir, '{}.tsv'.format(comparison_id))
comparison_instance.output_results_table(results_table, threshold_list)
# End marker for time
end = time.time()
print('\n DIANA INFO:\tTIME OF EXECUTION: {:.3f} seconds or {:.3f} minutes.\n'.format(end - start, (end - start) / 60))
return
#######################
#######################
# SECONDARY FUNCTIONS #
#######################
#######################
def fileExist(file):
"""
Checks if a file exists AND is a file
"""
return os.path.exists(file) and os.path.isfile(file)
def create_directory(directory):
"""
Checks if a directory exists and if not, creates it
"""
try:
os.stat(directory)
except:
os.mkdir(directory)
return
def check_file(file):
"""
Checks if a file exists and if not, raises FileNotFound exception
"""
if not fileExist(file):
raise FileNotFound(file)
def check_directory(directory):
"""
Checks if a directory exists and if not, raises DirNotFound exception
"""
try:
os.stat(directory)
except:
raise DirNotFound(directory)
def get_targets_in_sif_file(sif_file, targets):
"""
Get the targets that are inside the network given by the user
"""
targets_in_network = set()
str_tar = [str(x) for x in targets]
with open(sif_file, 'r') as sif_fd:
for line in sif_fd:
node1, score, node2 = line.strip().split('\t')
if node1 in str_tar:
targets_in_network.add(node1)
if node2 in str_tar:
targets_in_network.add(node2)
return list(targets_in_network)
def read_parameters_file(parameters_file):
"""
Reads the parameters file of a drug profile
"""
with open(parameters_file, 'r') as parameters_fd:
header = parameters_fd.readline()
content = parameters_fd.readline()
fields = content.strip().split('\t')
return fields
def read_drug_profiles(drug_dir, mappings_dir, target_associations_dir, network_associations_dir, threshold_list=[1, 2, 5, 'functions']):
"""
Read the profiles of a drug.
"""
# Check/Read parameters file
output_parameters_file = os.path.join(drug_dir, 'parameters.txt')
check_file(output_parameters_file)
parameters = read_parameters_file(output_parameters_file)
drugname = parameters[1]
# Create drug instance
drug_instance = diana_drug.Drug(drugname)
# Read target profile
target_dir = os.path.join(drug_dir, 'target_profiles')
target_file = os.path.join(target_dir, '{}_targets.txt'.format(drugname))
check_file(target_file)
drug_instance.obtain_targets_from_file(target_file, target_type_id='geneid')
print(' DIANA INFO:\tTARGETS OF {}: {}\n'.format(drugname, ', '.join(drug_instance.targets)))
# Read PFAM profile
pfam_file = os.path.join(target_dir, 'pfam_profile.txt')
if fileExist(pfam_file):
drug_instance.obtain_pfams_from_file(pfam_file)
else:
# Obtain the PFAMs from a table
target_mapping_file = os.path.join(mappings_dir, 'geneid_target_mappings.txt')
drug_instance.obtain_pfams_from_geneid_target_table(drug_instance.targets, target_mapping_file)
# Read target-functional profiles
type_functions = ['gobp', 'gomf', 'reactome']
type_corrections = ['fdr_bh', 'bonferroni']
target_function_results = {}
for type_function in type_functions:
associations_file = os.path.join(target_associations_dir, '{}_to_gene.txt'.format(type_function))
for type_correction in type_corrections:
targets_functional_file = os.path.join(target_dir, 'targets_functional_profile_{}_{}.txt'.format(type_function, type_correction))
if fileExist(targets_functional_file):
targets_functions_instance = network_analysis.FunctionalProfile(targets_functional_file, 'targets', 'targets')
else:
top_scoring.functional_top_scoring(top_geneids=drug_instance.targets, type_correction=type_correction, associations_file=associations_file, output_file=targets_functional_file)
target_function_results['{}-{}'.format(type_function, type_correction)] = targets_functions_instance
# Read GUILD node scores
guild_dir = os.path.join(drug_dir, 'guild_profiles')
scores_file = os.path.join(guild_dir, 'output_scores.sif.netcombo')
check_file(scores_file)
guild_profile_instance = network_analysis.GUILDProfile(scores_file, type_id='geneid', top=100, top_type='percentage')
drug_instance.targets_in_network = set([target for target in drug_instance.targets if target in guild_profile_instance.node_to_score.keys()])
# Read GUILD edge scores
scored_network_file = os.path.join(guild_dir, 'network_scored.txt')
check_file(scored_network_file)
scored_network_instance = network_analysis.EdgeProfile(network_file=scored_network_file, type_id='geneid', network_format='sif', top=100)
# Read GUILD profiles
guild_results = {}
for top_threshold in threshold_list:
if top_threshold == 'functions':
# Read profiles associated to a functions threshold
for type_function in type_functions:
associations_file = os.path.join(network_associations_dir, '{}_to_gene.txt'.format(type_function))
for type_correction in type_corrections:
# Obtain cut-off
output_sliding_window_file = os.path.join(guild_dir, 'sliding_window_{}_{}.txt'.format(type_function, type_correction))
if fileExist(output_sliding_window_file):
cutoff_central_position, cutoff_right_interval = functional_analysis.read_sliding_window_file(output_sliding_window_file=output_sliding_window_file, num_seeds=len(drug_instance.targets_in_network))
else:
cutoff_central_position, cutoff_right_interval = functional_analysis.calculate_functions_threshold(seed_geneids=drug_instance.targets_in_network, geneid_to_score=guild_profile_instance.node_to_score, type_correction=type_correction, associations_file=associations_file, output_sliding_window_file=output_sliding_window_file, output_seeds_enrichment_file=None, seed_functional_enrichment=False)
print('{} - {} - {} - {}: Cut-off central position: {}. Cut-off right interval position: {}'.format(drug_instance.drug_name, top_threshold, type_function, type_correction, cutoff_central_position, cutoff_right_interval))
# Obtain node profile
node_file = os.path.join(guild_dir, 'node_profile_top_{}_{}_{}_{}.txt'.format('functions', type_function, type_correction, guild_profile_instance.type_id))
if fileExist(node_file):
node_profile_instance = network_analysis.GUILDProfile(scores_file=node_file, type_id=guild_profile_instance.type_id, top=cutoff_right_interval, top_type='number_of_nodes')
else:
node_profile_instance = guild_profile_instance.create_node_profile(threshold=cutoff_right_interval, threshold_type='number_of_nodes', output_file=node_file)
guild_results['node-{}-{}-{}'.format(top_threshold, type_function, type_correction)] = node_profile_instance
# Obtain edge profile
edge_file = os.path.join(guild_dir, 'edge_profile_top_{}_{}_{}_{}.txt'.format('functions', type_function, type_correction, guild_profile_instance.type_id))
if fileExist(edge_file):
edge_profile_instance = network_analysis.EdgeProfile(network_file=edge_file, type_id=guild_profile_instance.type_id, network_format='sif', top=cutoff_right_interval, top_type='number_of_nodes')
else:
edge_profile_instance = scored_network_instance.create_edge_profile(node_to_score=guild_profile_instance.node_to_score, threshold=cutoff_right_interval, threshold_type='number_of_nodes', output_file=edge_file)
guild_results['edge-{}-{}-{}'.format(top_threshold, type_function, type_correction)] = edge_profile_instance
# Obtain functional profile
function_file = os.path.join(guild_dir, 'functional_profile_top_{}_{}_{}.txt'.format('functions', type_function, type_correction))
if fileExist(function_file):
functional_profile_instance = network_analysis.FunctionalProfile(functional_file=function_file, top=cutoff_right_interval, node_file=node_file)
else:
functional_profile_instance = node_profile_instance.create_functional_profile(type_correction=type_correction, output_file=function_file, associations_file=associations_file)
guild_results['functional-{}-{}-{}'.format(top_threshold, type_function, type_correction)] = functional_profile_instance
else:
# Obtain node profile
node_file = os.path.join(guild_dir, 'node_profile_top_{}_{}.txt'.format(str(top_threshold), guild_profile_instance.type_id))
check_file(node_file)
node_profile_instance = network_analysis.GUILDProfile(node_file, type_id=guild_profile_instance.type_id, top=top_threshold, top_type='percentage')
guild_results['node-{}'.format(top_threshold)] = node_profile_instance
# Obtain edge profiles
edge_file = os.path.join(guild_dir, 'edge_profile_top_{}_{}.txt'.format(str(top_threshold), guild_profile_instance.type_id))
check_file(edge_file)
edge_profile_instance = network_analysis.EdgeProfile(network_file=edge_file, type_id=guild_profile_instance.type_id, network_format='sif', top=top_threshold, top_type='percentage')
guild_results['edge-{}'.format(top_threshold)] = edge_profile_instance
# Obtain functional profiles
for type_function in type_functions:
for type_correction in type_corrections:
functional_file = os.path.join(guild_dir, 'functional_profile_top_{}_{}_{}.txt'.format(str(top_threshold), type_function, type_correction))
if fileExist(functional_file):
functional_profile_instance = network_analysis.FunctionalProfile(functional_file=functional_file, top=top_threshold, node_file=node_file)
else:
functional_profile_instance = node_profile_instance.create_functional_profile(type_correction=type_correction, output_file=functional_file, associations_file=associations_file)
guild_results['functional-{}-{}-{}'.format(top_threshold, type_function, type_correction)] = functional_profile_instance
# Read structures
structure_file = os.path.join(drug_dir, 'structure_profiles/structure_profile.txt')
if fileExist(structure_file):
drug_instance.obtain_SMILES_from_file(structure_file)
else:
# Obtain SMILES from a table
drug_mapping_file = os.path.join(mappings_dir, 'drugbank_drug_mappings.txt')
drugbank_smiles_file = os.path.join(mappings_dir, 'drugbank_drug_smiles.txt')
# First, translate the drug input name to drugbankid (if necessary)
drugbankids = drug_instance.obtain_drugbankids_from_table(drug_mapping_file)
# Search SMILES in the table
drug_instance.obtain_SMILES_from_table(drugbankids, drugbank_smiles_file)
# Read ATCs
atc_file = os.path.join(drug_dir, 'atc_profiles/ATC_profile.txt')
if fileExist(atc_file):
drug_instance.obtain_ATCs_from_file(atc_file)
else:
# Obtain ATCs from a table
drug_mapping_file = os.path.join(mappings_dir, 'drugbank_drug_mappings.txt')
drugbank_atc_file = os.path.join(mappings_dir, 'drugbank_drug_atc.txt')
# First, translate the drug input name to drugbankid (if necessary)
drugbankids = drug_instance.obtain_drugbankids_from_table(drug_mapping_file)
# Search ATCs in the table
drug_instance.obtain_ATCs_from_table(drugbankids, drugbank_atc_file)
# Read side effects
se_file = os.path.join(drug_dir, 'se_profiles/SE_profile.txt')
if fileExist(se_file):
drug_instance.obtain_SE_from_file(se_file)
else:
# Obtain side effects from a table
drugbank_side_effects_file = os.path.join(mappings_dir, 'drugbank_drug_side_effects.txt')
# First, translate the drug input name to drugbankid (if necessary)
drugbankids = drug_instance.obtain_drugbankids_from_table(drug_mapping_file)
# Search side effects in the table
drug_instance.obtain_SE_from_table(drugbankids, drugbank_side_effects_file)
return drug_instance, guild_profile_instance, scored_network_instance, target_function_results, guild_results
class FileNotFound(Exception):
"""
Exception raised when a file is not found.
"""
def __init__(self, file):
self.file = file
def __str__(self):
return 'The file {} has not been found.\nTherefore, the comparison cannot be performed. Please, check that all the profiles have been correctly generated.\n'.format(self.file)
class DirNotFound(Exception):
"""
Exception raised when a directory is not found.
"""
def __init__(self, directory):
self.directory = directory
def __str__(self):
return 'The directory {} has not been found.\nTherefore, the comparison cannot be performed. Please, check that all the parameters have been correctly introduced and the profiles have been correctly generated.\n'.format(self.directory)
if __name__ == "__main__":
main()
| mit |
hsiaoyi0504/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
ashhher3/cvxpy | examples/expr_trees/inpainting.py | 12 | 3379 | from scipy import misc
import matplotlib.pyplot as plt
import numpy as np
l = misc.lena()
l = l.astype(np.float64, copy=False)
l = l/np.max(l) #rescale pixels into [0,1]
plt.imshow(l, cmap=plt.cm.gray)
#plt.show()
from PIL import Image, ImageDraw
num_lines = 5
width = 5
imshape = l.shape
def drawRandLine(draw,width):
x = [np.random.randint(0,im.size[0]) for i in range(2)]
y = [np.random.randint(0,im.size[1]) for i in range(2)]
xy = zip(x,y)
#fill gives the color
draw.line(xy,fill=255,width=width)
im = Image.new("L",imshape)
draw = ImageDraw.Draw(im)
for i in range(num_lines):
drawRandLine(draw,width)
del draw
# im.show()
err = np.asarray(im,dtype=np.bool)
r = l.copy()
r[err] = 1.0
plt.imshow(r, cmap=plt.cm.gray)
import itertools
idx2pair = np.nonzero(err)
idx2pair = zip(idx2pair[0].tolist(), idx2pair[1].tolist())
pair2idx = dict(itertools.izip(idx2pair, xrange(len(idx2pair))))
idx2pair = np.array(idx2pair) #convert back to numpy array
import scipy.sparse as sp
from cvxopt import spmatrix
def involvedpairs(pairs):
''' Get all the pixel pairs whose gradient involves an unknown pixel.
Input should be a set or dictionary of pixel pair tuples
'''
for pair in pairs: #loop through unknown pixels
yield pair
left = (pair[0],pair[1]-1)
if left[1] >= 0 and left not in pairs: #if 'left' in picture, and not already unknown
yield left
top = (pair[0]-1,pair[1])
topright = (pair[0]-1,pair[1]+1)
#if not on top boundary, top is fixed, and top not already touched by upper right pixel
if pair[0] > 0 and top not in pairs and topright not in pairs:
yield top
def formCOO(pair2idx, img):
m, n = img.shape
Is, Js, Vs, bs = [[],[]], [[],[]], [[],[]], [[],[]]
row = 0
for pixel1 in involvedpairs(pair2idx):
bottom = (pixel1[0]+1,pixel1[1])
right= (pixel1[0],pixel1[1]+1)
for i, pixel2 in enumerate([bottom, right]):
if pixel2[0] >= m or pixel2[1] >= n:
bs[i].append(0)
continue
b = 0
for j, pix in enumerate([pixel2, pixel1]):
if pix in pair2idx: #unknown pixel
Is[i].append(row)
Js[i].append(pair2idx[pix])
Vs[i].append(pow(-1,j))
else: #known pixel
b += pow(-1,j)*img[pix]
bs[i].append(b)
row += 1
'''
Form Gx and Gy such that the x-component of the gradient is Gx*x + bx,
where x is an array representing the unknown pixel values.
'''
m = len(bs[0])
n = len(pair2idx)
Gx = spmatrix(Vs[1], Is[1], Js[1],(m,n))
Gy = spmatrix(Vs[0], Is[0], Js[0],(m,n))
bx = np.array(bs[1])
by = np.array(bs[0])
return Gx, Gy, bx, by
Gx, Gy, bx, by = formCOO(pair2idx, r)
import cvxpy as cp
m, n = Gx.size
x = cp.Variable(n)
#z = cp.vstack((x.__rmul__(Gx) + bx).T, (x.__rmul__(Gy) + by).T)
#z = cp.hstack(x.__rmul__(Gx) + bx, x.__rmul__(Gy) + by)
z = cp.Variable(m, 2)
constraints = [z[:, 0] == x.__rmul__(Gx) + bx,
z[:, 1] == x.__rmul__(Gy) + by]
objective = cp.Minimize(sum([cp.norm(z[i,:]) for i in range(m)]))
p = cp.Problem(objective, constraints)
import cProfile
cProfile.run("""
result = p.solve(solver=cp.ECOS, verbose=True)
""")
| gpl-3.0 |
tmhm/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
e-koch/Phys-595 | project_code/Spec Fitting/plot_spec.py | 1 | 4420 |
'''
Figure of lines to fit for proposal
'''
import matplotlib.pyplot as p
from astropy.io import fits
from scipy.ndimage import gaussian_filter1d
lines = [r"H$\alpha$-6562$\AA$ \& NII-6583$\AA$", r"H$\beta$", r"H$\gamma", r"H$\delta$",
"Ca H & K", "MgII", "NaI", "OIIIa \& b"]
lambdas = [6562, 4861, 4340, 4103, 3950, 5175, 5894, 4959]
filename = "/Users/eric/../../Volumes/Mac_Storage/sdss_datamining/spec-0266-51602-0001.fits"
spec_file = fits.open(filename)
flux = spec_file[1].data["flux"]
smooth = gaussian_filter1d(flux, 2)
lam_wav = 10**spec_file[1].data["loglam"] / (1 + spec_file[2].data["Z"])
p.plot(lam_wav, smooth, 'b')
p.xlabel(r"Wavelength ($\AA$)")
p.ylabel(r"Flux ($10^{-17} erg/s/cm^2/\AA$)")
p.ylim(smooth.min(), smooth.max()+10)
p.xlim(lam_wav.min(), 6800)
for name, lam in zip(lines, lambdas):
p.axvline(x=lam, color='k', linestyle='--')
# p.annotate(name, xy=(lam, 60), xytext=(lam, 60))
p.annotate(r"H$\alpha$ - 6562$\AA$ \& NII - 6583$\AA$",
xy=(6562, 50), xytext=(6562, 50), rotation=90,
horizontalalignment='right',
verticalalignment='center')
p.annotate(r"H$\beta$ - 4861$\AA$",
xy=(4861, 110), xytext=(4861+5, 110), rotation=90,
horizontalalignment='right',
verticalalignment='center')
p.annotate(r"H$\gamma$ - 4340$\AA$",
xy=(4340, 110), xytext=(4340+20, 110), rotation=90,
horizontalalignment='left',
verticalalignment='center')
p.annotate(r"H$\delta$ - 4103$\AA$",
xy=(4103, 90), xytext=(4103+20, 90), rotation=90,
horizontalalignment='left',
verticalalignment='center')
p.annotate(r"Ca H \& K - 3934, 3969$\AA$",
xy=(3950, 90), xytext=(3950, 90), rotation=90,
horizontalalignment='right',
verticalalignment='center')
p.annotate(r"MgII - 5175$\AA$",
xy=(5175, 110), xytext=(5175+20, 110), rotation=90,
horizontalalignment='left',
verticalalignment='center')
p.annotate(r"NaI - 5894$\AA$",
xy=(5894, 60), xytext=(5894, 60), rotation=90,
horizontalalignment='right',
verticalalignment='center')
p.annotate(r"[OIII] - 4959, 5007$\AA$",
xy=(4959, 50), xytext=(4959+20, 45), rotation=90,
horizontalalignment='left',
verticalalignment='center')
p.show()
p.close()
# Two
filename = "/Users/eric/../../Volumes/Mac_Storage/sdss_datamining/spec-0273-51957-0136.fits"
spec_file = fits.open(filename)
flux = spec_file[1].data["flux"]
smooth = gaussian_filter1d(flux, 2)
lam_wav = 10**spec_file[1].data["loglam"] / (1 + spec_file[2].data["Z"])
p.plot(lam_wav, smooth, 'b')
p.xlabel(r"Wavelength ($\AA$)")
p.ylabel(r"Flux ($10^{-17} erg/s/cm^2/\AA$)")
p.ylim(smooth.min(), smooth.max()+5)
p.xlim(lam_wav.min(), 6800)
for name, lam in zip(lines, lambdas):
p.axvline(x=lam, color='k', linestyle='--')
# p.annotate(name, xy=(lam, 60), xytext=(lam, 60))
p.annotate(r"H$\alpha$ - 6562$\AA$ \& NII - 6583$\AA$",
xy=(6562, 17), xytext=(6562-35, 15), rotation=90,
horizontalalignment='right',
verticalalignment='center')
p.annotate(r"H$\beta$ - 4861$\AA$",
xy=(4861, 15), xytext=(4861+5, 15), rotation=90,
horizontalalignment='right',
verticalalignment='center')
p.annotate(r"H$\gamma$ - 4340$\AA$",
xy=(4340, 15), xytext=(4340+20, 15), rotation=90,
horizontalalignment='left',
verticalalignment='center')
p.annotate(r"H$\delta$ - 4103$\AA$",
xy=(4103, 15), xytext=(4103+20, 15), rotation=90,
horizontalalignment='left',
verticalalignment='center')
p.annotate(r"Ca H \& K - 3934, 3969$\AA$",
xy=(3950, 15), xytext=(3950, 15), rotation=90,
horizontalalignment='right',
verticalalignment='center')
p.annotate(r"MgII - 5175$\AA$",
xy=(5175, 15), xytext=(5175+20, 15), rotation=90,
horizontalalignment='left',
verticalalignment='center')
p.annotate(r"NaI - 5894$\AA$",
xy=(5894, 15), xytext=(5894, 15), rotation=90,
horizontalalignment='right',
verticalalignment='center')
p.annotate(r"[OIII] - 4959, 5007$\AA$",
xy=(4959, 15), xytext=(4959+20, 15), rotation=90,
horizontalalignment='left',
verticalalignment='center')
p.show()
| mit |
blackecho/Deep-Learning-TensorFlow | yadlt/models/autoencoders/denoising_autoencoder.py | 2 | 9565 | """Implementation of Denoising Autoencoder using TensorFlow."""
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from yadlt.core import Layers, Loss, Trainer
from yadlt.core import UnsupervisedModel
from yadlt.utils import tf_utils, utilities
class DenoisingAutoencoder(UnsupervisedModel):
"""Implementation of Denoising Autoencoders using TensorFlow.
The interface of the class is sklearn-like.
"""
def __init__(
self, n_components, name='dae', loss_func='mse',
enc_act_func=tf.nn.tanh, dec_act_func=None, num_epochs=10,
batch_size=10, opt='sgd', learning_rate=0.01, momentum=0.9,
corr_type='none', corr_frac=0., regtype='none', regcoef=5e-4):
"""Constructor.
Parameters
----------
n_components : int
Number of hidden units.
name : str, optional (default = "dae")
Model name (used for save/load from disk).
loss_func : str, optional (default = "mse")
Loss function. ['mse', 'cross_entropy']
enc_act_func : tf.nn.[activation]
Activation function for the encoder.
dec_act_func : tf.nn.[activation]
Activation function for the decoder.
num_epochs : int, optional (default = 10)
Number of epochs.
batch_size : int, optional (default = 10)
Size of each mini-batch.
opt : str, optional (default = "sgd")
Which tensorflow optimizer to use.
Possible values: ['sgd', 'momentum', 'adagrad', 'adam']
learning_rate : float, optional (default = 0.01)
Initial learning rate.
momentum : float, optional (default = 0.9)
Momentum parameter (only used if opt = "momentum").
corr_type : str, optional (default = "none")
Type of input corruption.
Can be one of: ["none", "masking", "salt_and_pepper"]
corr_frac : float, optional (default = 0.0)
Fraction of the input to corrupt.
regtype : str, optional (default = "none")
Type of regularization to apply.
Can be one of: ["none", "l1", "l2"].
regcoef : float, optional (default = 5e-4)
Regularization parameter. If 0, no regularization.
Only considered if regtype != "none".
"""
UnsupervisedModel.__init__(self, name)
self.loss_func = loss_func
self.learning_rate = learning_rate
self.opt = opt
self.num_epochs = num_epochs
self.batch_size = batch_size
self.momentum = momentum
self.regtype = regtype
self.regcoef = regcoef
self.loss = Loss(self.loss_func)
self.trainer = Trainer(
opt, learning_rate=learning_rate, momentum=momentum)
self.n_components = n_components
self.enc_act_func = enc_act_func
self.dec_act_func = dec_act_func
self.corr_type = corr_type
self.corr_frac = corr_frac
self.input_data_orig = None
self.input_data = None
self.W_ = None
self.bh_ = None
self.bv_ = None
def _train_model(self, train_X, train_Y=None, val_X=None, val_Y=None):
"""Train the model.
Parameters
----------
train_X : array_like
Training data, shape (num_samples, num_features).
train_Y : array_like, optional (default = None)
Reference training data, shape (num_samples, num_features).
val_X : array_like, optional, default None
Validation data, shape (num_val_samples, num_features).
val_Y : array_like, optional, default None
Reference validation data, shape (num_val_samples, num_features).
Returns
-------
self : trained model instance
"""
pbar = tqdm(range(self.num_epochs))
for i in pbar:
self._run_train_step(train_X)
if val_X is not None:
feed = {self.input_data_orig: val_X,
self.input_data: val_X}
err = tf_utils.run_summaries(
self.tf_session, self.tf_merged_summaries,
self.tf_summary_writer, i, feed, self.cost)
pbar.set_description("Reconstruction loss: %s" % (err))
return self
def _run_train_step(self, train_X):
"""Run a training step.
A training step is made by randomly corrupting the training set,
randomly shuffling it, divide it into batches and run the optimizer
for each batch.
Parameters
----------
train_X : array_like
Training data, shape (num_samples, num_features).
Returns
-------
self
"""
x_corrupted = utilities.corrupt_input(
train_X, self.tf_session, self.corr_type, self.corr_frac)
shuff = list(zip(train_X, x_corrupted))
np.random.shuffle(shuff)
batches = [_ for _ in utilities.gen_batches(shuff, self.batch_size)]
for batch in batches:
x_batch, x_corr_batch = zip(*batch)
tr_feed = {self.input_data_orig: x_batch,
self.input_data: x_corr_batch}
self.tf_session.run(self.train_step, feed_dict=tr_feed)
def build_model(self, n_features, W_=None, bh_=None, bv_=None):
"""Create the computational graph.
Parameters
----------
n_features : int
Number of units in the input layer.
W_ : array_like, optional (default = None)
Weight matrix np array.
bh_ : array_like, optional (default = None)
Hidden bias np array.
bv_ : array_like, optional (default = None)
Visible bias np array.
Returns
-------
self
"""
self._create_placeholders(n_features)
self._create_variables(n_features, W_, bh_, bv_)
self._create_encode_layer()
self._create_decode_layer()
variables = [self.W_, self.bh_, self.bv_]
regterm = Layers.regularization(variables, self.regtype, self.regcoef)
self.cost = self.loss.compile(
self.reconstruction, self.input_data_orig, regterm=regterm)
self.train_step = self.trainer.compile(self.cost)
def _create_placeholders(self, n_features):
"""Create the TensorFlow placeholders for the model.
:return: self
"""
self.input_data_orig = tf.placeholder(
tf.float32, [None, n_features], name='x-input')
self.input_data = tf.placeholder(
tf.float32, [None, n_features], name='x-corr-input')
# not used in this model, created just to comply
# with unsupervised_model.py
self.input_labels = tf.placeholder(tf.float32)
self.keep_prob = tf.placeholder(tf.float32, name='keep-probs')
def _create_variables(self, n_features, W_=None, bh_=None, bv_=None):
"""Create the TensorFlow variables for the model.
:return: self
"""
if W_:
self.W_ = tf.Variable(W_, name='enc-w')
else:
self.W_ = tf.Variable(
tf.truncated_normal(
shape=[n_features, self.n_components], stddev=0.1),
name='enc-w')
if bh_:
self.bh_ = tf.Variable(bh_, name='hidden-bias')
else:
self.bh_ = tf.Variable(tf.constant(
0.1, shape=[self.n_components]), name='hidden-bias')
if bv_:
self.bv_ = tf.Variable(bv_, name='visible-bias')
else:
self.bv_ = tf.Variable(tf.constant(
0.1, shape=[n_features]), name='visible-bias')
def _create_encode_layer(self):
"""Create the encoding layer of the network.
Returns
-------
self
"""
with tf.name_scope("encoder"):
activation = tf.add(
tf.matmul(self.input_data, self.W_),
self.bh_
)
if self.enc_act_func:
self.encode = self.enc_act_func(activation)
else:
self.encode = activation
return self
def _create_decode_layer(self):
"""Create the decoding layer of the network.
Returns
-------
self
"""
with tf.name_scope("decoder"):
activation = tf.add(
tf.matmul(self.encode, tf.transpose(self.W_)),
self.bv_
)
if self.dec_act_func:
self.reconstruction = self.dec_act_func(activation)
else:
self.reconstruction = activation
return self
def get_parameters(self, graph=None):
"""Return the model parameters in the form of numpy arrays.
Parameters
----------
graph : tf.Graph, optional (default = None)
Tensorflow graph object.
Returns
-------
dict : model parameters dictionary.
"""
g = graph if graph is not None else self.tf_graph
with g.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
return {
'enc_w': self.W_.eval(),
'enc_b': self.bh_.eval(),
'dec_b': self.bv_.eval()
}
| mit |
walterreade/scikit-learn | examples/cluster/plot_cluster_comparison.py | 58 | 4681 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
codester2/devide | modules/viewers/CodeRunner.py | 7 | 8182 | # check python24/lib/code.py - exceptions raised only result in
# printouts. perhaps we want a real exception?
import code # deep magic
import md5
from module_base import ModuleBase
from module_mixins import IntrospectModuleMixin, WindowRenameMixin
import module_utils
import sys
import module_kits.wx_kit
from module_kits.wx_kit.python_shell_mixin import PythonShellMixin
import wx
NUMBER_OF_INPUTS = 5
NUMBER_OF_OUTPUTS = 5
EDITWINDOW_LABELS = ['Scratch', 'Setup', 'Execute']
class CodeRunner(IntrospectModuleMixin, ModuleBase, PythonShellMixin,
WindowRenameMixin):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self.inputs = [None] * NUMBER_OF_INPUTS
self.outputs = [None] * NUMBER_OF_OUTPUTS
self._config.scratch_src = self._config.setup_src = \
self._config.execute_src = ''
self._config_srcs = ['scratch_src',
'setup_src',
'execute_src']
# these are the real deals, i.e. the underlying logic
self._src_scratch = self._src_setup = self._src_execute = ''
self._srcs = ['_src_scratch', '_src_setup', '_src_execute']
# we use this to determine whether the current setup src has been
# executed or not
self._md5_setup_src = ''
self._create_view_frame()
PythonShellMixin.__init__(self, self._view_frame.shell_window,
module_manager)
module_utils.create_eoca_buttons(self, self._view_frame,
self._view_frame.view_frame_panel,
ok_default=False,
cancel_hotkey=False)
# more convenience bindings
self._editwindows = [self._view_frame.scratch_editwindow,
self._view_frame.setup_editwindow,
self._view_frame.execute_editwindow]
self.interp = self._view_frame.shell_window.interp
# set interpreter on all three our editwindows
for ew in self._editwindows:
ew.set_interp(self.interp)
self._bind_events()
self.interp.locals.update(
{'obj' : self})
# initialise macro packages
self.support_vtk(self.interp)
self.support_matplotlib(self.interp)
self.config_to_logic()
self.logic_to_config()
self.config_to_view()
self.view_initialised = True
self.view()
def close(self):
# parameter is exception_printer method
PythonShellMixin.close(self,
self._module_manager.log_error_with_exception)
for i in range(len(self.get_input_descriptions())):
self.set_input(i, None)
self._view_frame.Destroy()
del self._view_frame
ModuleBase.close(self)
def get_input_descriptions(self):
return ('Any input',) * NUMBER_OF_INPUTS
def get_output_descriptions(self):
return ('Dynamic output',) * NUMBER_OF_OUTPUTS
def set_input(self, idx, input_stream):
self.inputs[idx] = input_stream
def get_output(self, idx):
return self.outputs[idx]
def view_to_config(self):
for ew, cn, i in zip(self._editwindows, self._config_srcs,
range(len(self._editwindows))):
setattr(self._config, cn, ew.GetText())
self.set_editwindow_modified(i, False)
def config_to_view(self):
for ew, cn, i in zip(self._editwindows, self._config_srcs,
range(len(self._editwindows))):
ew.SetText(getattr(self._config, cn))
self.set_editwindow_modified(i, False)
def config_to_logic(self):
logic_changed = False
for cn,ln in zip(self._config_srcs, self._srcs):
c = getattr(self._config, cn)
l = getattr(self, ln)
if c != l:
setattr(self, ln, c)
logic_changed = True
return logic_changed
def logic_to_config(self):
config_changed = False
for cn,ln in zip(self._config_srcs, self._srcs):
c = getattr(self._config, cn)
l = getattr(self, ln)
if l != c:
setattr(self._config, cn, l)
config_changed = True
return config_changed
def execute_module(self):
# we only attempt running setup_src if its md5 is different from
# that of the previous setup_src that we attempted to run
hd = md5.md5(self._src_setup).hexdigest()
if hd != self._md5_setup_src:
self._md5_setup_src = hd
self._run_source(self._src_setup, raise_exceptions=True)
self._run_source(self._src_execute, raise_exceptions=True)
def view(self):
self._view_frame.Show()
self._view_frame.Raise()
def _bind_events(self):
wx.EVT_MENU(self._view_frame, self._view_frame.file_open_id,
self._handler_file_open)
wx.EVT_MENU(self._view_frame, self._view_frame.file_save_id,
self._handler_file_save)
wx.EVT_MENU(self._view_frame, self._view_frame.run_id,
self._handler_run)
for i in range(len(self._editwindows)):
def observer_modified(ew, i=i):
self.set_editwindow_modified(i, True)
self._editwindows[i].observer_modified = observer_modified
def _create_view_frame(self):
import resources.python.code_runner_frame
reload(resources.python.code_runner_frame)
self._view_frame = module_utils.instantiate_module_view_frame(
self, self._module_manager,
resources.python.code_runner_frame.\
CodeRunnerFrame)
self._view_frame.main_splitter.SetMinimumPaneSize(50)
# tried both self._view_frame.shell_window setFocus /
# SetFocus. On Ubuntu 8.04, wxPython 2.8.7.1 this doesn't
# seem to work.
def _handler_file_open(self, evt):
try:
filename, t = self._open_python_file(self._view_frame)
except IOError, e:
self._module_manager.log_error_with_exception(
'Could not open file %s into CodeRunner edit: %s' %
(filename, str(e)))
else:
if filename is not None:
cew = self._get_current_editwindow()
cew.SetText(t)
self._view_frame.statusbar.SetStatusText(
'Loaded %s into current edit.' % (filename,))
def _handler_file_save(self, evt):
try:
cew = self._get_current_editwindow()
filename = self._saveas_python_file(cew.GetText(),
self._view_frame)
if filename is not None:
self._view_frame.statusbar.SetStatusText(
'Saved current edit to %s.' % (filename,))
except IOError, e:
self._module_manager.log_error_with_exception(
'Could not save CodeRunner edit to file %s: %s' %
(filename, str(e)))
def _handler_run(self, evt):
self.run_current_edit()
def _get_current_editwindow(self):
sel = self._view_frame.edit_notebook.GetSelection()
return [self._view_frame.scratch_editwindow,
self._view_frame.setup_editwindow,
self._view_frame.execute_editwindow][sel]
def run_current_edit(self):
cew = self._get_current_editwindow()
text = cew.GetText()
self._run_source(text)
self._view_frame.statusbar.SetStatusText(
'Current edit run completed.')
def set_editwindow_modified(self, idx, modified):
pt = EDITWINDOW_LABELS[idx]
if modified:
pt += ' *'
self._view_frame.edit_notebook.SetPageText(idx, pt)
| bsd-3-clause |
kashif/scikit-learn | sklearn/preprocessing/tests/test_label.py | 12 | 17807 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
le.fit(["apple", "orange"])
msg = "bad input shape"
assert_raise_message(ValueError, msg, le.transform, "apple")
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
mwv/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
sanja7s/EEDC | src/timelines/all_nodes_plug_timeline.py | 1 | 7788 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
author: sanja7s
---------------
plot the distribution
"""
import os
import datetime as dt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from collections import defaultdict
from matplotlib import colors
from pylab import MaxNLocator
import pylab as pl
from mpl_toolkits.axes_grid import inset_locator
matplotlib.style.use('ggplot')
IN_DIR = "../../data/timelines/all_TIMELINES"
os.chdir(IN_DIR)
font = {'family' : 'sans-serif',
'variant' : 'normal',
'weight' : 'light',
'size' : 14}
grid = {'color' : 'gray',
'alpha' : 0.5,
'linestyle' : '-.'}
lines = {'color' : 'gray'}
#xticks = {'color' : 'gray'}
matplotlib.rc('font', **font)
matplotlib.rc('grid', **grid)
matplotlib.rc('lines', **lines)
#matplotlib.rc('ticks', **ticks)
def read_in_all_node_variables():
f_in = 'nodedata.csv'
distr_plug = defaultdict(float)
distr_num_jobs = defaultdict(int)
distr_CPU = defaultdict(tuple)
distr_MEM = defaultdict(tuple)
distr_rb = defaultdict(tuple)
node_distr = defaultdict(list)
i = 0
TESTi = 10000
with open(f_in, 'r') as f:
for line in f:
n, t, n, node, n, r, n, b, n, swpd, n, free, n,\
buff, n, cache, n, si, n, so, n, bi, n, bo, n,\
in1, n, cs, n, us, n, sy, n, id7s, n, wa, n,\
st, n, cpu1, n, dram1, n, cpu2, n, dram2, n, p, n,\
jobs_list, n, tp, n = line.split('"')
if node not in node_distr:
node_distr[node] = defaultdict(list)
distr_plug[node] = defaultdict(float)
distr_num_jobs[node] = defaultdict(int)
distr_CPU[node] = defaultdict(tuple)
distr_MEM[node] = defaultdict(tuple)
distr_rb[node] = defaultdict(tuple)
t = dt.datetime.fromtimestamp(int(t))
jobs = jobs_list.split(',')
"""
i += 1
if i == TESTi:
return node_distr
"""
distr_plug[node][t] = float(p)
if jobs_list == "":
distr_num_jobs[node][t] = 0
else:
distr_num_jobs[node][t] = len(jobs)
distr_CPU[node][t] = (float(cpu1), float(cpu2))
distr_MEM[node][t] = (float(dram1), float(dram2))
distr_rb[node][t] = (int(r), int(b))
node_distr[node] = [distr_plug[node], distr_num_jobs[node], distr_CPU[node], \
distr_MEM[node], distr_rb[node]]
return node_distr
def plot_plug_timeline(node, distr_plug):
print 'Plotting plug values'
d = distr_plug
dates = d.keys()
X = pd.to_datetime(dates)
values = [v if v > 0 else 0 for v in d.values()]
ts = pd.Series(values, index = X)
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print min(values), max(values)
fig, ax = plt.subplots()
ts.plot(color = 'darkblue')
for tl in ax.get_yticklabels():
tl.set_color('darkblue')
fig.autofmt_xdate()
ax.set_xlabel('time')
ax.set_ylabel('plug value', color='darkblue')
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
ymin = 240
ymax = 280
if min(values) < 160:
ymin = min(values) - 10
if max(values) > 250:
ymax = max(values) + 10
plt.ylim(ymin, ymax)
plt.savefig('plug/plug_timeline_node_' + node + '.png')
return fig, ax, plt
def plot_plug_and_CPUs_timeline(node, d, distr_plug):
dates = d.keys()
X = pd.to_datetime(dates)
values1 = []
values2 = []
for el in d.values():
if el[0] > 0:
v1 = el[0]
else:
v1 = 0
values1.append(v1)
if el[1] > 0:
v2 = el[1]
else:
v2 = 0
values2.append(v2)
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print 'Min and max CPU1 ', min(values1), max(values1)
print 'Min and max CPU2 ', min(values2), max(values2)
fig, ax1, plt = plot_plug_timeline(node, distr_plug)
ax2 = ax1.twinx()
ts1 = pd.Series(values1, index = X)
ax2.scatter(X, values1, marker='s', color='red', s=4, label = 'CPU1')
#ts1.plot(color='red', label = 'CPU1')
ts2 = pd.Series(values2, index = X)
ax2.scatter(X, values2, marker='s', color='magenta', s=4, label = 'CPU2')
#ts2.plot(color='magenta', label = 'CPU2')
ax2.set_ylabel('CPU values', color='red')
ya = ax2.get_yaxis()
ya.set_major_locator(MaxNLocator(integer=True))
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
for tl in ax2.get_yticklabels():
tl.set_color('r')
handles, labels = ax2.get_legend_handles_labels()
l = ax2.legend(handles, labels, loc=3)
for text in l.get_texts():
text.set_color('gray')
plt.savefig('CPU_plug/CPUs_plug_timeline_node_' + node + '.png')
def plot_plug_and_MEM_timeline(node, d, distr_plug):
dates = d.keys()
X = pd.to_datetime(dates)
values1 = [v[0] if v[0] > -1 else -1 for v in d.values()]
values2 = [v[1] if v[1] > -1 else -1 for v in d.values()]
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print 'Min and max MEM1 ', min(values1), max(values1)
print 'Min and max MEM2 ', min(values2), max(values2)
fig, ax1, plt = plot_plug_timeline(node, distr_plug)
ax2 = ax1.twinx()
ax2.scatter(X, values1,
marker='s', color='darkgreen', s=4, label = 'DRAM1')
ax2.scatter(X, values2,
marker='s', color='olive', s=4, label = 'DRAM2')
ax2.set_ylabel('DRAM values', color='olive')
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
for tl in ax2.get_yticklabels():
tl.set_color('olive')
handles, labels = ax2.get_legend_handles_labels()
l = ax2.legend(handles, labels, loc=1)
for text in l.get_texts():
text.set_color('gray')
plt.savefig('MEM/MEM_plug_timeline_node_' + node + '.png')
def plot_plug_and_rb_timeline(node, d, distr_plug):
dates = d.keys()
X = pd.to_datetime(dates)
values1 = [v[0] if v[0] > 0 else 0 for v in d.values()]
values2 = [v[1] if v[1] > 0 else 0 for v in d.values()]
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print 'Min and max MEM1 ', min(values1), max(values1)
print 'Min and max MEM2 ', min(values2), max(values2)
fig, ax1, plt = plot_plug_timeline(node, distr_plug)
ax2 = ax1.twinx()
ax2.scatter(X, values1,
marker='s', color='tomato', s=3, label = 'r')
ax2.scatter(X, values2,
marker='s', color='sage', s=3, label = 'b')
ax2.set_ylabel('r and b values', color='sage')
ya = ax2.get_yaxis()
ya.set_major_locator(MaxNLocator(integer=True))
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
for tl in ax2.get_yticklabels():
tl.set_color('sage')
handles, labels = ax2.get_legend_handles_labels()
l = ax2.legend(handles, labels, loc=1)
for text in l.get_texts():
text.set_color('gray')
plt.savefig('rb/rb_plug_timeline_node_' + node + '.png')
def plot_plug_and_num_jobs_timeline(node, distr_num_jobs, distr_plug):
d = distr_num_jobs
dates = d.keys()
X = pd.to_datetime(dates)
values = d.values()
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print min(values), max(values)
fig, ax1, plt = plot_plug_timeline(node, distr_plug)
ax2 = ax1.twinx()
ax2.scatter(X, values,
marker='s', color='red', s=7)
ax2.set_ylabel('# of jobs', color='red')
ya = ax2.get_yaxis()
ya.set_major_locator(MaxNLocator(integer=True))
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
for tl in ax2.get_yticklabels():
tl.set_color('r')
plt.savefig('n_jobs_plug/num_jobs_and_plug_timeline_node_' + node + '.png')
def plot_all():
node_variables = read_in_all_node_variables()
for node in node_variables:
distr_plug = node_variables[node][0]
distr_num_jobs = node_variables[node][1]
distr_CPU = node_variables[node][2]
distr_MEM = node_variables[node][3]
distr_rb = node_variables[node][4]
plot_plug_timeline(node, distr_plug)
plot_plug_and_num_jobs_timeline(node, distr_num_jobs, distr_plug)
plot_plug_and_rb_timeline(node, distr_rb, distr_plug)
plot_plug_and_CPUs_timeline(node, distr_CPU, distr_plug)
plot_plug_and_MEM_timeline(node, distr_MEM, distr_plug)
plot_all()
| apache-2.0 |
pratapvardhan/pandas | asv_bench/benchmarks/multiindex_object.py | 3 | 3894 | import string
import numpy as np
import pandas.util.testing as tm
from pandas import date_range, MultiIndex
from .pandas_vb_common import setup # noqa
class GetLoc(object):
goal_time = 0.2
def setup(self):
self.mi_large = MultiIndex.from_product(
[np.arange(1000), np.arange(20), list(string.ascii_letters)],
names=['one', 'two', 'three'])
self.mi_med = MultiIndex.from_product(
[np.arange(1000), np.arange(10), list('A')],
names=['one', 'two', 'three'])
self.mi_small = MultiIndex.from_product(
[np.arange(100), list('A'), list('A')],
names=['one', 'two', 'three'])
def time_large_get_loc(self):
self.mi_large.get_loc((999, 19, 'Z'))
def time_large_get_loc_warm(self):
for _ in range(1000):
self.mi_large.get_loc((999, 19, 'Z'))
def time_med_get_loc(self):
self.mi_med.get_loc((999, 9, 'A'))
def time_med_get_loc_warm(self):
for _ in range(1000):
self.mi_med.get_loc((999, 9, 'A'))
def time_string_get_loc(self):
self.mi_small.get_loc((99, 'A', 'A'))
def time_small_get_loc_warm(self):
for _ in range(1000):
self.mi_small.get_loc((99, 'A', 'A'))
class Duplicates(object):
goal_time = 0.2
def setup(self):
size = 65536
arrays = [np.random.randint(0, 8192, size),
np.random.randint(0, 1024, size)]
mask = np.random.rand(size) < 0.1
self.mi_unused_levels = MultiIndex.from_arrays(arrays)
self.mi_unused_levels = self.mi_unused_levels[mask]
def time_remove_unused_levels(self):
self.mi_unused_levels.remove_unused_levels()
class Integer(object):
goal_time = 0.2
def setup(self):
self.mi_int = MultiIndex.from_product([np.arange(1000),
np.arange(1000)],
names=['one', 'two'])
self.obj_index = np.array([(0, 10), (0, 11), (0, 12),
(0, 13), (0, 14), (0, 15),
(0, 16), (0, 17), (0, 18),
(0, 19)], dtype=object)
def time_get_indexer(self):
self.mi_int.get_indexer(self.obj_index)
def time_is_monotonic(self):
self.mi_int.is_monotonic
class Duplicated(object):
goal_time = 0.2
def setup(self):
n, k = 200, 5000
levels = [np.arange(n),
tm.makeStringIndex(n).values,
1000 + np.arange(n)]
labels = [np.random.choice(n, (k * n)) for lev in levels]
self.mi = MultiIndex(levels=levels, labels=labels)
def time_duplicated(self):
self.mi.duplicated()
class Sortlevel(object):
goal_time = 0.2
def setup(self):
n = 1182720
low, high = -4096, 4096
arrs = [np.repeat(np.random.randint(low, high, (n // k)), k)
for k in [11, 7, 5, 3, 1]]
self.mi_int = MultiIndex.from_arrays(arrs)[np.random.permutation(n)]
a = np.repeat(np.arange(100), 1000)
b = np.tile(np.arange(1000), 100)
self.mi = MultiIndex.from_arrays([a, b])
self.mi = self.mi.take(np.random.permutation(np.arange(100000)))
def time_sortlevel_int64(self):
self.mi_int.sortlevel()
def time_sortlevel_zero(self):
self.mi.sortlevel(0)
def time_sortlevel_one(self):
self.mi.sortlevel(1)
class Values(object):
goal_time = 0.2
def setup_cache(self):
level1 = range(1000)
level2 = date_range(start='1/1/2012', periods=100)
mi = MultiIndex.from_product([level1, level2])
return mi
def time_datetime_level_values_copy(self, mi):
mi.copy().values
def time_datetime_level_values_sliced(self, mi):
mi[:10].values
| bsd-3-clause |
Karel-van-de-Plassche/bokeh | bokeh/sampledata/tests/test_airports.py | 2 | 1951 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
import pandas as pd
# Bokeh imports
from bokeh.util.testing import verify_all
# Module under test
#import bokeh.sampledata.airports as bsa
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.airports", ALL))
@pytest.mark.sampledata
def test_data():
import bokeh.sampledata.airports as bsa
assert isinstance(bsa.data, pd.DataFrame)
# don't check detail for external data
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
| bsd-3-clause |
oduwa/Wheat-Count | PicNumero/template_matching.py | 2 | 4973 | from collections import namedtuple
import numpy as np
from scipy import misc
from skimage.color import rgb2gray
from skimage import data
from skimage.feature import match_template
# Way to import from matplotlib without warning according to
# https://github.com/matplotlib/matplotlib/issues/5836#issuecomment-223997114
import warnings;
with warnings.catch_warnings():
warnings.simplefilter("ignore");
import matplotlib.pyplot as plt
def get_n_max_indices(X, n):
'''Returns a 1d numpy array containing indices of n biggest values in x.'''
indices = np.zeros(n);
for i in range(0,n):
indices[i] = np.argmax(X);
X.flat[int(indices[i])] = -1;
return indices
def match_templates_1(search_image, template_image, n=0):
'''
Calculates the n closest matches of some template image in another image and
displays a figure illustrating the results.
Args:
search_image: image within which to match template.
template_image: image to be matched.
n: number of matches to be found. ie. closest n matches.
'''
Point = namedtuple('Point', ['x', 'y'])
# Calculate template matches
match_result = match_template(search_image, template_image);
# Get closest n matches
print(match_result.shape)
if(n == 0):
n = int(match_result.shape[1]);
matched_point_list = []
max_indices = get_n_max_indices(match_result, n)
for index in max_indices:
ij = np.unravel_index(int(index), match_result.shape)
x, y = ij[::-1]
point = Point(x,y)
#print(point)
matched_point_list.append(point)
# Display
fig = plt.figure(figsize=(8, 3))
plt.gray()
ax1 = plt.subplot(1, 3, 1)
ax2 = plt.subplot(1, 3, 2, adjustable='box-forced')
ax3 = plt.subplot(1, 3, 3, sharex=ax2, sharey=ax2, adjustable='box-forced')
ax1.imshow(template_image)
ax1.set_axis_off()
ax1.set_title('grain template')
# highlight matched regions
ax2.imshow(search_image)
ax2.set_axis_off()
ax2.set_title('image')
himage, wimage = template_image.shape
for point in matched_point_list:
rect = plt.Rectangle((point.x, point.y), wimage, himage, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
# highlight matched regions
ax3.imshow(match_result)
ax3.set_axis_off()
ax3.set_title('`match_template`\nresult')
ax3.autoscale(False)
for point in matched_point_list:
ax3.plot(point.x, point.y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)
plt.show()
def match_templates_2(search_image, template_image, n=1):
'''
Calculates the n closest matches of some template image in another image and
displays a figure illustrating the results. This is a variation of the
match_templates_1() method which takes a different approach to matching by
iteratively matching, removing found match from search image and then running
match again.
Args:
search_image: image within which to match template.
template_image: image to be matched.
n: number of matches to be found. ie. closest n matches.
'''
Point = namedtuple('Point', ['x', 'y'])
matched_point_list = []
# Calculate template matches
i = 0
while (i < n):
print(n)
match_result = match_template(search_image, template_image);
# Get closest match and store position
ij = np.unravel_index(np.argmax(match_result), match_result.shape)
x, y = ij[::-1]
point = Point(x,y)
matched_point_list.append(point)
hTemplate, wTemplate = template_image.shape
# Set matched patch to black
for i in range(0, hTemplate):
for j in range(0, wTemplate):
search_image[i + point.y][j + point.x] = 0
i = i + 1
# Display
fig = plt.figure(figsize=(8, 3))
plt.gray()
ax1 = plt.subplot(1, 3, 1)
ax2 = plt.subplot(1, 3, 2, adjustable='box-forced')
ax3 = plt.subplot(1, 3, 3, sharex=ax2, sharey=ax2, adjustable='box-forced')
ax1.imshow(template_image)
ax1.set_axis_off()
ax1.set_title('template')
# highlight matched regions
ax2.imshow(search_image)
ax2.set_axis_off()
ax2.set_title('image')
himage, wimage = template_image.shape
for point in matched_point_list:
rect = plt.Rectangle((point.x, point.y), wimage, himage, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
# highlight matched regions
ax3.imshow(match_result)
ax3.set_axis_off()
ax3.set_title('`match_template`\nresult')
ax3.autoscale(False)
for point in matched_point_list:
ax3.plot(point.x, point.y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)
plt.show()
search_image = rgb2gray(misc.imread("../Assets/bush.png"))
template_image = rgb2gray(misc.imread("../Assets/grain.png"))
match_templates_1(search_image, template_image, 1);
| mit |
teonlamont/mne-python | examples/decoding/plot_decoding_csp_eeg.py | 8 | 5516 | """
===========================================================================
Motor imagery decoding from EEG data using the Common Spatial Pattern (CSP)
===========================================================================
Decoding of motor imagery applied to EEG data decomposed using CSP.
Here the classifier is applied to features extracted on CSP filtered signals.
See http://en.wikipedia.org/wiki/Common_spatial_pattern and [1]_. The EEGBCI
dataset is documented in [2]_. The data set is available at PhysioNet [3]_.
References
----------
.. [1] Zoltan J. Koles. The quantitative extraction and topographic mapping
of the abnormal components in the clinical EEG. Electroencephalography
and Clinical Neurophysiology, 79(6):440--447, December 1991.
.. [2] Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,
Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer Interface
(BCI) System. IEEE TBME 51(6):1034-1043.
.. [3] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, Mark RG,
Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000) PhysioBank,
PhysioToolkit, and PhysioNet: Components of a New Research Resource for
Complex Physiologic Signals. Circulation 101(23):e215-e220.
"""
# Authors: Martin Billinger <martin.billinger@tugraz.at>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import ShuffleSplit, cross_val_score
from mne import Epochs, pick_types, find_events
from mne.channels import read_layout
from mne.io import concatenate_raws, read_raw_edf
from mne.datasets import eegbci
from mne.decoding import CSP
print(__doc__)
# #############################################################################
# # Set parameters and read data
# avoid classification of evoked responses by using epochs that start 1s after
# cue onset.
tmin, tmax = -1., 4.
event_id = dict(hands=2, feet=3)
subject = 1
runs = [6, 10, 14] # motor imagery: hands vs feet
raw_fnames = eegbci.load_data(subject, runs)
raw_files = [read_raw_edf(f, preload=True, stim_channel='auto') for f in
raw_fnames]
raw = concatenate_raws(raw_files)
# strip channel names of "." characters
raw.rename_channels(lambda x: x.strip('.'))
# Apply band-pass filter
raw.filter(7., 30., fir_design='firwin', skip_by_annotation='edge')
events = find_events(raw, shortest_event=0, stim_channel='STI 014')
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=None, preload=True)
epochs_train = epochs.copy().crop(tmin=1., tmax=2.)
labels = epochs.events[:, -1] - 2
###############################################################################
# Classification with linear discrimant analysis
# Define a monte-carlo cross-validation generator (reduce variance):
scores = []
epochs_data = epochs.get_data()
epochs_data_train = epochs_train.get_data()
cv = ShuffleSplit(10, test_size=0.2, random_state=42)
cv_split = cv.split(epochs_data_train)
# Assemble a classifier
lda = LinearDiscriminantAnalysis()
csp = CSP(n_components=4, reg=None, log=True, norm_trace=False)
# Use scikit-learn Pipeline with cross_val_score function
clf = Pipeline([('CSP', csp), ('LDA', lda)])
scores = cross_val_score(clf, epochs_data_train, labels, cv=cv, n_jobs=1)
# Printing the results
class_balance = np.mean(labels == labels[0])
class_balance = max(class_balance, 1. - class_balance)
print("Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
class_balance))
# plot CSP patterns estimated on full data for visualization
csp.fit_transform(epochs_data, labels)
layout = read_layout('EEG1005')
csp.plot_patterns(epochs.info, layout=layout, ch_type='eeg',
units='Patterns (AU)', size=1.5)
###############################################################################
# Look at performance over time
sfreq = raw.info['sfreq']
w_length = int(sfreq * 0.5) # running classifier: window length
w_step = int(sfreq * 0.1) # running classifier: window step size
w_start = np.arange(0, epochs_data.shape[2] - w_length, w_step)
scores_windows = []
for train_idx, test_idx in cv_split:
y_train, y_test = labels[train_idx], labels[test_idx]
X_train = csp.fit_transform(epochs_data_train[train_idx], y_train)
X_test = csp.transform(epochs_data_train[test_idx])
# fit classifier
lda.fit(X_train, y_train)
# running classifier: test classifier on sliding window
score_this_window = []
for n in w_start:
X_test = csp.transform(epochs_data[test_idx][:, :, n:(n + w_length)])
score_this_window.append(lda.score(X_test, y_test))
scores_windows.append(score_this_window)
# Plot scores over time
w_times = (w_start + w_length / 2.) / sfreq + epochs.tmin
plt.figure()
plt.plot(w_times, np.mean(scores_windows, 0), label='Score')
plt.axvline(0, linestyle='--', color='k', label='Onset')
plt.axhline(0.5, linestyle='-', color='k', label='Chance')
plt.xlabel('time (s)')
plt.ylabel('classification accuracy')
plt.title('Classification score over time')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
russel1237/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
shaneknapp/spark | python/pyspark/sql/session.py | 8 | 31156 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from functools import reduce
from threading import RLock
from pyspark import since
from pyspark.rdd import RDD
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.pandas.conversion import SparkConversionMixin
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import DataType, StructType, \
_make_type_verifier, _infer_schema, _has_nulltype, _merge_type, _create_converter, \
_parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
Parameters
----------
schema : :class:`pyspark.sql.types.DataType`, str or list, optional
a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
sampleRatio : float, optional
the sample ratio of rows used for inferring
Returns
-------
:class:`DataFrame`
Examples
--------
>>> rdd.toDF().collect()
[Row(name='Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(SparkConversionMixin):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
.. autoattribute:: builder
:annotation:
Examples
--------
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
>>> from datetime import datetime
>>> from pyspark.sql import Row
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + 1)=2, (d + 1)=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, 'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
_sc = None
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
.. versionadded:: 2.0.0
Parameters
----------
key : str, optional
a key name string for configuration property
value : str, optional
a value for configuration property
conf : :class:`SparkConf`, optional
an instance of :class:`SparkConf`
Examples
--------
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
.. versionadded:: 2.0.0
Parameters
----------
master : str
a url for spark master
"""
return self.config("spark.master", master)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
.. versionadded:: 2.0.0
Parameters
----------
name : str
an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive SerDes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
def _sparkContext(self, sc):
with self._lock:
self._sc = sc
return self
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
.. versionadded:: 2.0.0
Examples
--------
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
if self._sc is not None:
sc = self._sc
else:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
# This SparkContext may be an existing one.
sc = SparkContext.getOrCreate(sparkConf)
# Do not update `SparkConf` for existing `SparkContext`, as it's shared
# by all sessions.
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
return session
builder = Builder()
"""A class attribute having a :class:`Builder` to construct :class:`SparkSession` instances."""
_instantiatedSession = None
_activeSession = None
def __init__(self, sparkContext, jsparkSession=None):
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
if self._jvm.SparkSession.getDefaultSession().isDefined() \
and not self._jvm.SparkSession.getDefaultSession().get() \
.sparkContext().isStopped():
jsparkSession = self._jvm.SparkSession.getDefaultSession().get()
else:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
SparkSession._activeSession = self
self._jvm.SparkSession.setDefaultSession(self._jsparkSession)
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@classmethod
def getActiveSession(cls):
"""
Returns the active SparkSession for the current thread, returned by the builder
.. versionadded:: 3.0.0
Returns
-------
:class:`SparkSession`
Spark session if an active session exists for the current thread
Examples
--------
>>> s = SparkSession.getActiveSession()
>>> l = [('Alice', 1)]
>>> rdd = s.sparkContext.parallelize(l)
>>> df = s.createDataFrame(rdd, ['name', 'age'])
>>> df.select("age").collect()
[Row(age=1)]
"""
from pyspark import SparkContext
sc = SparkContext._active_spark_context
if sc is None:
return None
else:
if sc._jvm.SparkSession.getActiveSession().isDefined():
SparkSession(sc, sc._jvm.SparkSession.getActiveSession().get())
return SparkSession._activeSession
else:
return None
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
Returns
-------
:class:`pyspark.sql.conf.RuntimeConfig`
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions, etc.
.. versionadded:: 2.0.0
Returns
-------
:class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
.. versionadded:: 2.0.0
Returns
-------
:class:`UDFRegistration`
"""
from pyspark.sql.udf import UDFRegistration
return UDFRegistration(self)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
.. versionadded:: 2.0.0
Parameters
----------
start : int
the start value
end : int, optional
the end value (exclusive)
step : int, optional
the incremental step (default: 1)
numPartitions : int, optional
the number of partitions of the DataFrame
Returns
-------
:class:`DataFrame`
Examples
--------
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data, names=None):
"""
Infer schema from list of Row, dict, or tuple.
Parameters
----------
data : iterable
list of Row, dict, or tuple
names : list, optional
list of column names
Returns
-------
:class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row, dict, or tuple.
Parameters
----------
rdd : :class:`RDD`
an RDD of Row, dict, or tuple
samplingRatio : float, optional
sampling ratio, or no sampling (default)
names : list, optional
Returns
-------
:class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
@staticmethod
def _create_shell_session():
"""
Initialize a SparkSession for a pyspark shell session. This is called from shell.py
to make error handling simpler without needing to declare local variables in that
script, which would expose those to users.
"""
import py4j
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
try:
# Try to access HiveConf, it will raise exception if Hive is not added
conf = SparkConf()
if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive':
SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
return SparkSession.builder\
.enableHiveSupport()\
.getOrCreate()
else:
return SparkSession.builder.getOrCreate()
except (py4j.protocol.Py4JError, TypeError):
if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
"please make sure you build spark with hive")
return SparkSession.builder.getOrCreate()
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of either :class:`Row`,
:class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value".
Each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
.. versionadded:: 2.0.0
.. versionchanged:: 2.1.0
Added verifySchema.
Parameters
----------
data : :class:`RDD` or iterable
an RDD of any kind of SQL data representation (:class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
schema : :class:`pyspark.sql.types.DataType`, str or list, optional
a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
samplingRatio : float, optional
the sample ratio of rows used for inferring
verifySchema : bool, optional
verify data types of every row against schema. Enabled by default.
Returns
-------
:class:`DataFrame`
Notes
-----
Usage with spark.sql.execution.arrow.pyspark.enabled=True is experimental.
Examples
--------
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1='Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name='Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name='Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1='Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name='Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name='Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name='Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name='Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a='Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
SparkSession._activeSession = self
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, str):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
# Must re-encode any unicode strings to be consistent with StructField names
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
# Create a DataFrame from pandas DataFrame.
return super(SparkSession, self).createDataFrame(
data, schema, samplingRatio, verifySchema)
return self._create_dataframe(data, schema, samplingRatio, verifySchema)
def _create_dataframe(self, data, schema, samplingRatio, verifySchema):
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
.. versionadded:: 2.0.0
Returns
-------
:class:`DataFrame`
Examples
--------
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2='row1'), Row(f1=2, f2='row2'), Row(f1=3, f2='row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
.. versionadded:: 2.0.0
Returns
-------
:class:`DataFrame`
Examples
--------
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
.. versionadded:: 2.0.0
Returns
-------
:class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
Returns
-------
:class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` instances active on `this` context.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
Returns
-------
:class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
from pyspark.sql.context import SQLContext
self._sc.stop()
# We should clean the default session up. See SPARK-23228.
self._jvm.SparkSession.clearDefaultSession()
self._jvm.SparkSession.clearActiveSession()
SparkSession._instantiatedSession = None
SparkSession._activeSession = None
SQLContext._instantiatedContext = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
jaredthomas68/FEM | assignments/coding1/fem.py | 2 | 8004 | import numpy as np
import scipy.sparse as sparse
from scipy.sparse.linalg import spsolve
import time
import matplotlib.pylab as plt
def define_stiffness_matrix(Nell, he):
k_basis = np.zeros((2,2))
K = np.zeros((Nell, Nell))
LM = get_lm(Nell)
for e in np.arange(0, Nell):
# get base k matrix for element e
for a in np.arange(0, 2):
for b in np.arange(0, 2):
k_basis[a,b] = ((-1)**(a+b))/he[e]
# populate the stifness matrix for entries corresponding to element e
for a in np.arange(0, 2):
if LM[a, e] == 0:
continue
for b in np.arange(0, 2):
if LM[b, e] == 0:
continue
K[int(LM[a,e]-1), int(LM[b,e]-1)] += k_basis[a,b]
return K
def define_forcing_vector(Nell, he, ffunc=0):
f_e = np.zeros(2)
F = np.zeros(Nell)
x1 = 0.
for e in np.arange(0, Nell):
x2 = x1 + he[e]
f_e = (he[e]/6.)*np.array([2.*forcing_function(x1, ffunc) + forcing_function(x2, ffunc),
forcing_function(x1, ffunc) + 2.*forcing_function(x2, ffunc)])
if e == 0:
F[e] = f_e[0]
F[e+1] = f_e[1]
elif e < Nell - 1:
F[e] += f_e[0]
F[e+1] = f_e[1]
else:
F[e] += f_e[0]
x1 += he[e]
return F
def forcing_function(x, ffunc=0):
f = 0
if ffunc == 0:
f = 1.
elif ffunc == 1:
f = x
elif ffunc == 2:
f = x**2
else:
ValueError("ffunc must be one of [0, 1, 2]")
return f
def solve_for_d(K, F):
# Kd = F
# sK = sparse.csr_matrix(K)
# sK = sparse.csr_matrix(F)
# d = np.matmul(np.linalg.inv(K),F)
# print d
sK = sparse.csr_matrix(K)
d = spsolve(sK, F)
return d
def solve_for_displacements(d, Nell, he, g=0):
u = np.zeros(Nell+1)
x1 = 0.0
u[0] = (1.-x1)*d[0]
for e in np.arange(1, Nell):
x1 += he[e]
# u[e] = u[e-1] + (1.-x1)*d[e]
# u[e] = (1.-x1)*d[e]
u[e] = d[e]
# u[-1] = u[-2] + g
u[-1] = g
return u
def get_node_locations_x(Nell, he):
x_el = np.zeros(Nell + 1)
for e in np.arange(1, Nell):
x_el[e] = x_el[e-1] + he[e-1]
x_el[Nell] = x_el[Nell-1] + he[Nell-1]
return x_el
def plot_displaccements(u, x, he, Nell, q=1, ffunc=1):
plt.rcParams.update({'font.size': 22})
x_ex = np.linspace(0, 1., 100)
x_el = get_node_locations_x(Nell, he)
u_ex = get_u_of_x_exact(x_ex, q, ffunc)
u_a = get_u_of_x_approx(x, u, he)
plt.figure()
plt.plot(x_ex, u_ex, label="Exact sol.", linewidth=3)
# plt.plot(x_el, u, '-s', label="Approx. sol. (nodes)")
plt.plot(x, u_a, '--r', markerfacecolor='none', label="Approx. sol.", linewidth=3)
plt.xlabel('X Position')
plt.ylabel("Displacement")
functions = ["$f(x)=c$", "$f(x)=x$", "$f(x)=x^2$"]
plt.title(functions[ffunc]+", $n=%i$" %Nell, y=1.02)
plt.legend(loc=3, frameon=False)
plt.tight_layout()
plt.savefig("displacement_func%i_Nell%i.pdf" %(ffunc, Nell))
plt.show()
plt.close()
return
def get_u_of_x_approx(Xp, u, he):
Nell = he.size
Xn = np.zeros(Nell+1)
for e in np.arange(1, Nell+1):
Xn[e] = Xn[e-1] + he[e-1]
u_x = np.zeros_like(Xp)
for x, i in zip(Xp, np.arange(0,Xp.size)):
for e in np.arange(1, Nell+1):
if x < Xn[e]:
u_x[i] = ((u[e] - u[e-1])/(Xn[e] - Xn[e-1]))*(x-Xn[e-1]) + u[e-1]
break
return u_x
def get_u_of_x_exact(x, q, ffunc=1):
u_ex = 0.
if ffunc == 0:
u_ex = q*(1.-x**2)/2.
elif ffunc == 1:
u_ex = q*(1.-x**3)/6.
elif ffunc == 2:
u_ex = q * (1. - x ** 4) / 12.
return u_ex
def quadrature(Xe, he, ue, ffunc):
# print Xe, he, ue, ffunc
# quit()
Nell = he.size
xi = np.array([-np.sqrt(3./5.), 0., np.sqrt(3./5.)])
w = np.array([5./9., 8./9., 5./9.])
error_squared = 0.0
for el in np.arange(0, Nell):
x = x_of_xi(xi, Xe, he, el)
dxdxi = he[el]/2
ux = get_u_of_x_exact(x, q=1, ffunc=ffunc)
ua = get_u_of_x_approx(x, ue, he)
error_squared += np.sum(((ux-ua)**2)*dxdxi*w)
error = np.sqrt(error_squared)
return error
def x_of_xi(xi, Xe, he, el):
# print xi, Xe.size, he, el
# quit()
x = (he[el]*xi+Xe[el]+Xe[el+1])/2.
return x
def get_lm(Nell):
"""
Populates the location matrix, LM, to track the location of data in the global stiffness matrix, K
:param Nell: Number of elements
:return LM: Location matrix for data in the stiffness matrix, K
"""
LM = np.zeros([2, Nell])
for e in np.arange(0, Nell):
LM[0, e] = e + 1
LM[1, e] = e + 2
LM[-1, -1] = 0
return LM
def plot_error():
n = np.array([10, 100, 1000, 10000])
error = np.zeros([3, n.size])
h = np.ones(n.size)/n
print h, n
for ffunc, i in zip(np.array([0, 1, 2]), np.arange(0, 3)):
for Nell, j in zip(n, np.arange(n.size)):
# print Nell
he = np.ones(Nell) / Nell
Xe = get_node_locations_x(Nell, he)
K = define_stiffness_matrix(Nell, he)
F = define_forcing_vector(Nell, he, ffunc=ffunc)
d = solve_for_d(K, F)
u = solve_for_displacements(d, Nell, he, g=0)
error[i,j] = quadrature(Xe, he, u, ffunc)
print "ffunc: %i, Nell: %i, Error: %f" % (ffunc, Nell, error[i, j])
np.savetxt('error.txt', np.c_[n, h, np.transpose(error)], header="Nell, h, E(f(x)=c), E(f(x)=x), E(f(x)=x^2)")
plt.loglog(h, error[0,:], '-o', label='$f(x)=c$')
plt.loglog(h, error[1,:], '-o', label='$f(x)=x$')
plt.loglog(h, error[2,:], '-o', label='$f(x)=x^2$')
plt.legend(loc=2)
plt.xlabel('$h$')
plt.ylabel('$Error$')
plt.show()
return
def get_slope():
data = np.loadtxt('error.txt')
fx0 = data[:, 2]
fx1 = data[:, 3]
fx2 = data[:, 4]
h = data[:, 1]
print h
print fx0
print fx1
print fx2
print (np.log(fx0[-1])-np.log(fx0[0]))/(np.log(h[-1])-(np.log(h[0])))
print (np.log(fx1[-1])-np.log(fx1[0]))/(np.log(h[-1])-(np.log(h[0])))
print (np.log(fx2[-1])-np.log(fx2[0]))/(np.log(h[-1])-(np.log(h[0])))
# print (fx2[-1]-fx2[0])/(h[-1]-h[0])
return
if __name__ == "__main__":
# get_lm(10)
# get_slope()
# exit()
# plot_error()
# exit()
#
# input variables
Nell = 10
ffunc = 2
# for ffunc in np.array([0, 1, 2]):
# for Nell in np.array([10, 100]):
he = np.ones(Nell)/Nell
x = np.linspace(0, 1, 4*Nell+1)
Xe = get_node_locations_x(Nell, he)
# error
# er_4_el = 0.0051
tic = time.time()
K = define_stiffness_matrix(Nell, he)
toc = time.time()
print he, Nell, K
print "Time to define stiffness matrix: %.3f (s)" % (toc-tic)
tic = time.time()
F = define_forcing_vector(Nell, he, ffunc=ffunc)
toc = time.time()
print F
print np.array([1/96., 1./16., 1/8., 3./16.])
print "Time to define forcing vector: %.3f (s)" % (toc - tic)
tic = time.time()
d = solve_for_d(K, F)
toc = time.time()
print d
print np.array([1./6., 21./128., 7./48., 37./384.])
print "Time to solve for d: %.3f (s)" % (toc - tic)
tic = time.time()
u = solve_for_displacements(d, Nell, he, g=0)
toc = time.time()
print u
print np.array([1./6., 21./128., 7./48., 37./384., 0])
print "Time to solve for u(x): %.3f (s)" % (toc - tic)
print "Finished"
error = quadrature(Xe, he, u, ffunc)
print error
plot_displaccements(u, x, he, Nell, ffunc=ffunc)
# pre compute
# Nnodes = Nell + 1
# x = 2.0
# node = 1
# Nodes = np.array([0, 0.5, 1])
# Lengths = Nodes/Nodes.size
#
# x = Nodes[0]
#
# print x, node, Nodes, Lengths
# print basis_functions(x, node, Nodes, Lengths)
| mit |
dangra/scrapy-sci | wallpaper_demo/wallpaper/classifier_pipelines.py | 2 | 1983 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
from scrapy.contrib.exporter import JsonItemExporter
from scrapy.exceptions import DropItem
from sklearn.linear_model import LogisticRegression
from scrapy_sci.status import Status, Reader
from scrapy_sci.classifier import ClassifierFactory
class ClassifiersPipeline(object):
def __init__(self):
self.status = Status()
self.classifiers = []
self.exporters = {}
for classifier in self.status.classifiers.keys():
CF = ClassifierFactory(self.status.classifiers[classifier])
CF.create_data_set("both")
lc = lc = CF.create_classifier(LogisticRegression(C=1e5), self.status.classifiers[classifier]['features']())
lc.fit()
self.classifiers.append((classifier, lc))
self.classifiers = sorted(self.classifiers, key = lambda a: a[1].estimate_accuracy(5, verbose=True))
print "Classifier {0} needs the most improvement; selected for export".format(self.classifiers[0][0])
for classification in self.status.classifiers[self.classifiers[0][0]]['classifications']:
f = file("{0}.json".format(classification), "wb")
self.exporters[classification] = JsonItemExporter(f)
def process_item(self, item, spider):
keep = True
for i, (name, classifier) in enumerate(self.classifiers):
item_classification = classifier.classify(item)
if i == 0: export_classification = item_classification
if self.status.classifiers[name]['classifications'][item_classification] == False:
raise DropItem("Item removed by classifier: {0}".format(name))
if keep == True:
self.exporters[export_classification].export_item(item)
| bsd-3-clause |
zycdragonball/tensorflow | tensorflow/examples/learn/text_classification_character_cnn.py | 29 | 5666 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using convolutional networks over characters for DBpedia dataset.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
MAX_LABEL = 15
CHARS_FEATURE = 'chars' # Name of the input character feature.
def char_cnn_model(features, labels, mode):
"""Character level convolutional neural network model to predict classes."""
features_onehot = tf.one_hot(features[CHARS_FEATURE], 256)
input_layer = tf.reshape(
features_onehot, [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
input_layer,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=POOLING_WINDOW,
strides=POOLING_STRIDE,
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = tf.contrib.learn.preprocessing.ByteProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
x_train = x_train.reshape([-1, MAX_DOCUMENT_LENGTH, 1, 1])
x_test = x_test.reshape([-1, MAX_DOCUMENT_LENGTH, 1, 1])
# Build model
classifier = tf.estimator.Estimator(model_fn=char_cnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
michaellaier/pymor | src/pymordemos/analyze_pickle.py | 1 | 9229 | #!/usr/bin/env python
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
"""Analyze pickled data demo.
Usage:
analyze_pickle.py histogram [--detailed=DETAILED_DATA] [--error-norm=NORM] REDUCED_DATA SAMPLES
analyze_pickle.py convergence [--detailed=DETAILED_DATA] [--error-norm=NORM] [--ndim=NDIM] REDUCED_DATA SAMPLES
analyze_pickle.py (-h | --help)
This demo loads a pickled reduced discretization, solves for random
parameters, estimates the reduction errors and then visualizes these
estimates. If the detailed discretization and the reconstructor are
also provided, the estimated error is visualized in comparison to
the real reduction error.
The needed data files are created by the thermal block demo, by
setting the '--pickle' option.
Arguments:
REDUCED_DATA File containing the pickled reduced discretization.
SAMPLES Number of parameter samples to test with.
Options:
--detailed=DETAILED_DATA File containing the high-dimensional discretization
and the reconstructor.
--error-norm=NORM Name of norm in which to compute the errors.
--ndim=NDIM Number of reduced basis dimensions for which to estimate
the error.
"""
from __future__ import absolute_import, division, print_function
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
from docopt import docopt
from pymor.core.logger import set_log_levels
from pymor.core.pickle import load
from pymor.reductors.basic import reduce_to_subbasis
set_log_levels({'pymor.algorithms': 'INFO',
'pymor.discretizations': 'INFO',
'pymor.la': 'INFO'})
def analyze_pickle_histogram(args):
args['SAMPLES'] = int(args['SAMPLES'])
print('Loading reduced discretization ...')
rb_discretization = load(open(args['REDUCED_DATA']))
mus = list(rb_discretization.parameter_space.sample_randomly(args['SAMPLES']))
us = []
for mu in mus:
print('Solving reduced for {} ... '.format(mu), end='')
sys.stdout.flush()
us.append(rb_discretization.solve(mu))
print('done')
print()
if hasattr(rb_discretization, 'estimate'):
ests = []
for u, mu in zip(us, mus):
print('Estimating error for {} ... '.format(mu), end='')
sys.stdout.flush()
ests.append(rb_discretization.estimate(u, mu=mu))
print('done')
if args['--detailed']:
print('Loading high-dimensional data ...')
discretization, reconstructor = load(open(args['--detailed']))
errs = []
for u, mu in zip(us, mus):
print('Calculating error for {} ... '.format(mu))
sys.stdout.flush()
err = discretization.solve(mu) - reconstructor.reconstruct(u)
if args['--error-norm']:
errs.append(np.max(getattr(discretization, args['--error-norm'] + '_norm')(err)))
else:
errs.append(np.max(err.l2_norm()))
print('done')
print()
try:
plt.style.use('ggplot')
except AttributeError:
pass # plt.style is only available in newer matplotlib versions
if hasattr(rb_discretization, 'estimate') and args['--detailed']:
# setup axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left+width+0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
plt.figure(1, figsize=(8, 8))
axScatter = plt.axes(rect_scatter)
axHistx = plt.axes(rect_histx)
axHisty = plt.axes(rect_histy)
# scatter plot
total_min = min(min(ests), min(errs)) * 0.9
total_max = max(max(ests), max(errs)) * 1.1
axScatter.set_xscale('log')
axScatter.set_yscale('log')
axScatter.set_xlim([total_min, total_max])
axScatter.set_ylim([total_min, total_max])
axScatter.set_xlabel('errors')
axScatter.set_ylabel('estimates')
axScatter.plot([total_min, total_max], [total_min, total_max], 'r')
axScatter.scatter(errs, ests)
# plot histograms
x_hist, x_bin_edges = np.histogram(errs, bins=np.logspace(np.log10(total_min), np.log10(total_max), 100))
axHistx.bar(x_bin_edges[1:], x_hist, width=x_bin_edges[:-1] - x_bin_edges[1:], color='blue')
y_hist, y_bin_edges = np.histogram(ests, bins=np.logspace(np.log10(total_min), np.log10(total_max), 100))
axHisty.barh(y_bin_edges[1:], y_hist, height=y_bin_edges[:-1] - y_bin_edges[1:], color='blue')
axHistx.set_xscale('log')
axHisty.set_yscale('log')
axHistx.set_xticklabels([])
axHisty.set_yticklabels([])
axHistx.set_xlim(axScatter.get_xlim())
axHisty.set_ylim(axScatter.get_ylim())
axHistx.set_ylim([0, max(max(x_hist), max(y_hist))])
axHisty.set_xlim([0, max(max(x_hist), max(y_hist))])
plt.show()
elif hasattr(rb_discretization, 'estimate'):
total_min = min(ests) * 0.9
total_max = max(ests) * 1.1
hist, bin_edges = np.histogram(ests, bins=np.logspace(np.log10(total_min), np.log10(total_max), 100))
plt.bar(bin_edges[1:], hist, width=bin_edges[:-1] - bin_edges[1:], color='blue')
plt.xlim([total_min, total_max])
plt.xscale('log')
plt.xlabel('estimated error')
plt.show()
elif args['--detailed']:
total_min = min(ests) * 0.9
total_max = max(ests) * 1.1
hist, bin_edges = np.histogram(errs, bins=np.logspace(np.log10(total_min), np.log10(total_max), 100))
plt.bar(bin_edges[1:], hist, width=bin_edges[:-1] - bin_edges[1:], color='blue')
plt.xlim([total_min, total_max])
plt.xscale('log')
plt.xlabel('error')
plt.show()
else:
raise ValueError('Nothing to plot!')
def analyze_pickle_convergence(args):
args['SAMPLES'] = int(args['SAMPLES'])
print('Loading reduced discretization ...')
rb_discretization = load(open(args['REDUCED_DATA']))
if args['--detailed']:
print('Loading high-dimensional data ...')
discretization, reconstructor = load(open(args['--detailed']))
if not hasattr(rb_discretization, 'estimate') and not args['--detailed']:
raise ValueError('Nothing to do! (Neither estimates nor true error can be computed.)')
dim = rb_discretization.solution_space.dim
if args['--ndim']:
dims = np.linspace(0, dim, args['--ndim'], dtype=np.int)
else:
dims = np.arange(dim + 1)
mus = list(rb_discretization.parameter_space.sample_randomly(args['SAMPLES']))
ESTS = []
ERRS = []
T_SOLVES = []
T_ESTS = []
for N in dims:
rd, rc, _ = reduce_to_subbasis(rb_discretization, N)
print('N = {:3} '.format(N), end='')
us = []
print('solve ', end='')
sys.stdout.flush()
start = time.time()
for mu in mus:
us.append(rd.solve(mu))
T_SOLVES.append((time.time() - start) * 1000. / len(mus))
print('estimate ', end='')
sys.stdout.flush()
if hasattr(rb_discretization, 'estimate'):
ests = []
start = time.time()
for u, mu in zip(us, mus):
# print('e', end='')
# sys.stdout.flush()
ests.append(rd.estimate(u, mu=mu))
ESTS.append(max(ests))
T_ESTS.append((time.time() - start) * 1000. / len(mus))
if args['--detailed']:
print('errors', end='')
sys.stdout.flush()
errs = []
for u, mu in zip(us, mus):
err = discretization.solve(mu) - reconstructor.reconstruct(rc.reconstruct(u))
if args['--error-norm']:
errs.append(np.max(getattr(discretization, args['--error-norm'] + '_norm')(err)))
else:
errs.append(np.max(err.l2_norm()))
ERRS.append(max(errs))
print()
print()
try:
plt.style.use('ggplot')
except AttributeError:
pass # plt.style is only available in newer matplotlib versions
plt.subplot(1, 2, 1)
if hasattr(rb_discretization, 'estimate'):
plt.semilogy(dims, ESTS, label='max. estimate')
if args['--detailed']:
plt.semilogy(dims, ERRS, label='max. error')
plt.xlabel('dimension')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(dims, T_SOLVES, label='avg. solve time')
if hasattr(rb_discretization, 'estimate'):
plt.plot(dims, T_ESTS, label='avg. estimate time')
plt.xlabel('dimension')
plt.ylabel('milliseconds')
plt.legend()
plt.show()
def analyze_pickle_demo(args):
if args['histogram']:
analyze_pickle_histogram(args)
else:
analyze_pickle_convergence(args)
if __name__ == '__main__':
# parse arguments
args = docopt(__doc__)
# run demo
analyze_pickle_demo(args)
| bsd-2-clause |
wanggang3333/scikit-learn | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
rmccoy7541/egillettii-rnaseq | scripts/sample_performance_analysis.py | 1 | 3763 | #! /bin/env python
import sys
from optparse import OptionParser
import copy
import matplotlib
matplotlib.use('Agg')
import pylab
import scipy.optimize
import numpy
from numpy import array
import dadi
import os
#call ms program from within dadi, using optimized parameters (converted to ms units)
core = "-n 1 0.922 -n 2 0.104 -ej 0.0330 2 1 -en 0.0330 1 1"
#modify the (20,20) portion to change the number of sampled chromosomes in each population
command = dadi.Misc.ms_command(100000, (20,20), core, 1, 2000)
ms_fs = dadi.Spectrum.from_ms_file(os.popen(command))
#scale the frequency spectrum to the same number of segregating sites
scaled_ms_fs = ms_fs.fixed_size_sample(1881)
scaled_ms_fs = scaled_ms_fs.fold()
#import demographic models
import gillettii_models
def runModel(outFile, nuW_start, nuC_start, T_start):
# Extract the spectrum from ms output
fs = scaled_ms_fs
ns = fs.sample_sizes
print 'sample sizes:', ns
# These are the grid point settings will use for extrapolation.
pts_l = [20,30,40]
# suggested that the smallest grid be slightly larger than the largest sample size. But this may take a long time.
# bottleneck_split model
func = gillettii_models.bottleneck_split
params = array([nuW_start, nuC_start, T_start])
upper_bound = [30, 10, 10]
lower_bound = [1e-5, 1e-10, 0]
# Make the extrapolating version of the demographic model function.
func_ex = dadi.Numerics.make_extrap_func(func)
# Calculate the model AFS
model = func_ex(params, ns, pts_l)
# Calculate likelihood of the data given the model AFS
# Likelihood of the data given the model AFS.
ll_model = dadi.Inference.ll_multinom(model, fs)
print 'Model log-likelihood:', ll_model, "\n"
# The optimal value of theta given the model.
theta = dadi.Inference.optimal_sfs_scaling(model, fs)
p0 = dadi.Misc.perturb_params(params, fold=1, lower_bound=lower_bound, upper_bound=upper_bound)
print 'perturbed parameters: ', p0, "\n"
popt = dadi.Inference.optimize_log_fmin(p0, fs, func_ex, pts_l, upper_bound=upper_bound, lower_bound=lower_bound, maxiter=None, verbose=len(params))
print 'Optimized parameters:', repr(popt), "\n"
#use the optimized parameters in a new model to try to get the parameters to converge
new_model = func_ex(popt, ns, pts_l)
ll_opt = dadi.Inference.ll_multinom(new_model, fs)
print 'Optimized log-likelihood:', ll_opt, "\n"
# Write the parameters and log-likelihood to the outFile
s = str(nuW_start) + '\t' + str(nuC_start) + '\t' + str(T_start) + '\t'
for i in range(0, len(popt)):
s += str(popt[i]) + '\t'
s += str(ll_opt) + '\n'
outFile.write(s)
#################
def mkOptionParser():
""" Defines options and returns parser """
usage = """%prog <outFN> <nuW_start> <nuC_start> <T_start>
%prog performs demographic inference on gillettii RNA-seq data. """
parser = OptionParser(usage)
return parser
def main():
""" see usage in mkOptionParser. """
parser = mkOptionParser()
options, args= parser.parse_args()
if len(args) != 4:
parser.error("Incorrect number of arguments")
outFN = args[0]
nuW_start = float(args[1])
nuC_start = float(args[2])
T_start = float(args[3])
if outFN == '-':
outFile = sys.stdout
else:
outFile = open(outFN, 'a')
runModel(outFile, nuW_start, nuC_start, T_start)
#run main
if __name__ == '__main__':
main()
| mit |
abalkin/numpy | tools/refguide_check.py | 1 | 37850 | #!/usr/bin/env python3
"""
refguide_check.py [OPTIONS] [-- ARGS]
- Check for a NumPy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
- Check docstring examples
- Check example blocks in RST files
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings::
$ python refguide_check.py --doctests ma
or in RST-based documentations::
$ python refguide_check.py --rst docs
"""
import copy
import doctest
import inspect
import io
import os
import re
import shutil
import sys
import tempfile
import warnings
import docutils.core
from argparse import ArgumentParser
from contextlib import contextmanager, redirect_stderr
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from docutils.parsers.rst import directives
from pkg_resources import parse_version
import sphinx
import numpy as np
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
SKIPBLOCK = doctest.register_optionflag('SKIPBLOCK')
if parse_version(sphinx.__version__) >= parse_version('1.5'):
# Enable specific Sphinx directives
from sphinx.directives.other import SeeAlso, Only
directives.register_directive('seealso', SeeAlso)
directives.register_directive('only', Only)
else:
# Remove sphinx directives that don't run without Sphinx environment.
# Sphinx < 1.5 installs all directives on import...
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "numpy"
PUBLIC_SUBMODULES = [
'core',
'doc.structured_arrays',
'f2py',
'linalg',
'lib',
'lib.recfunctions',
'fft',
'ma',
'polynomial',
'matrixlib',
'random',
'testing',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
# cases where NumPy docstrings import things from SciPy:
'numpy.lib.vectorize',
'numpy.random.standard_gamma',
'numpy.random.gamma',
'numpy.random.vonmises',
'numpy.random.power',
'numpy.random.zipf',
# remote / local file IO with DataSource is problematic in doctest:
'numpy.lib.DataSource',
'numpy.lib.Repository',
])
# Skip non-numpy RST files, historical release notes
# Any single-directory exact match will skip the directory and all subdirs.
# Any exact match (like 'doc/release') will scan subdirs but skip files in
# the matched directory.
# Any filename will skip that file
RST_SKIPLIST = [
'scipy-sphinx-theme',
'sphinxext',
'neps',
'changelog',
'doc/release',
'doc/source/release',
'c-info.ufunc-tutorial.rst',
'c-info.python-as-glue.rst',
'f2py.getting-started.rst',
'arrays.nditer.cython.rst',
]
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
# these names are not required to be in an autosummary:: listing
# despite being in ALL
REFGUIDE_AUTOSUMMARY_SKIPLIST = [
# NOTE: should NumPy have a better match between autosummary
# listings and __all__? For now, TR isn't convinced this is a
# priority -- focus on just getting docstrings executed / correct
r'numpy\.*',
]
# deprecated windows in scipy.signal namespace
for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',
'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',
'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',
'kaiser', 'nuttall', 'parzen', 'slepian', 'triang', 'tukey'):
REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name)
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
Parameters
----------
path: str or None
cwd: str or None
Returns
-------
str
Relative path or absolute path based on current working directory
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
"""
Finds the occurrences of function names, special directives like data
and functions and scipy constants in the docstrings of `module`. The
following patterns are searched for:
* 3 spaces followed by function name, and maybe some spaces, some
dashes, and an explanation; only function names listed in
refguide are formatted like this (mostly, there may be some false
positives
* special directives, such as data and function
* (scipy.constants only): quoted list
The `names_dict` is updated by reference and accessible in calling method
Parameters
----------
module : ModuleType
The module, whose docstrings is to be searched
names_dict : dict
Dictionary which contains module name as key and a set of found
function names and directives as value
Returns
-------
None
"""
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""
Return a copy of the __all__ dict with irrelevant items removed.
Parameters
----------
module : ModuleType
The module whose __all__ dict has to be processed
Returns
-------
deprecated : list
List of callable and deprecated sub modules
not_deprecated : list
List of non callable or non deprecated sub modules
others : list
List of remaining types of sub modules
"""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
if not all_dict:
# Must be a pure documentation module like doc.structured_arrays
all_dict.append('__doc__')
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""
Return sets of objects from all_dict.
Will return three sets:
{in module_name.__all__},
{in REFGUIDE*},
and {missing from others}
Parameters
----------
all_dict : list
List of non deprecated sub modules for module_name
others : list
List of sub modules for module_name
names : set
Set of function names or special directives present in
docstring of module_name
module_name : ModuleType
Returns
-------
only_all : set
only_ref : set
missing : set
"""
only_all = set()
for name in all_dict:
if name not in names:
for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:
if re.match(pat, module_name + '.' + name):
break
else:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
"""
Check if module `f` is deprecated
Parameter
---------
f : ModuleType
Returns
-------
bool
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except Exception:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
"""
Check that `all_dict` is consistent with the `names` in `module_name`
For instance, that there are no deprecated or extra objects.
Parameters
----------
all_dict : list
names : set
deprecated : list
others : list
module_name : ModuleType
dots : bool
Whether to print a dot for each check
Returns
-------
list
List of [(name, success_flag, output)...]
"""
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = set(only_ref).intersection(deprecated)
only_ref = set(only_ref).difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
output += "\nThis issue can be fixed by adding these objects to\n"
output += "the function listing in __init__.py for this module\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
output += "\nThis issue should likely be fixed by removing these objects\n"
output += "from the function listing in __init__.py for this module\n"
output += "or adding them to __all__.\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
"""
Validates the doc string in a snippet of documentation
`text` from file `name`
Parameters
----------
text : str
Docstring text
name : str
File name for which the doc string is to be validated
dots : bool
Whether to print a dot symbol for each check
Returns
-------
(bool, str)
"""
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'doc', 'currentmodule', 'autosummary', 'data', 'attr',
'obj', 'versionadded', 'versionchanged', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor', 'term', 'c:member',
'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Parameters
----------
module : ModuleType
names : set
Returns
-------
result : list
List of [(module_name, success_flag, output),...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except Exception:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'numpy': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float32': np.float32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,
'StringIO': io.StringIO,
}
class DTRunner(doctest.DocTestRunner):
"""
The doctest runner
"""
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
"""
Check the docstrings
"""
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary",
"# uninitialized", "#uninitialized"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', '.axis(', '.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
'# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(',
'.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = CHECK_NAMESPACE
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except Exception:
# Maybe we're printing a numpy array? This produces invalid python
# code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between
# values. So, reinsert commas and retry.
# TODO: handle (1) abberivation (`print(np.arange(10000))`), and
# (2) n-dim arrays with n > 1
s_want = want.strip()
s_got = got.strip()
cond = (s_want.startswith("[") and s_want.endswith("]") and
s_got.startswith("[") and s_got.endswith("]"))
if cond:
s_want = ", ".join(s_want[1:-1].split())
s_got = ", ".join(s_got[1:-1].split())
return self.check_output(s_want, s_got, optionflags)
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = (r'[\w\d_]+\(' +
', '.join([r'[\w\d_]+=(.+)']*num) +
r'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogeneous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""
Run modified doctests for the set of `tests`.
Parameters
----------
tests: list
full_name : str
verbose : bool
doctest_warning : bool
Returns
-------
tuple(bool, list)
Tuple of (success, output)
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = io.StringIO(newline='')
success = True
# Redirect stderr to the stdout or output
tmp_stderr = sys.stdout if doctest_warnings else output
@contextmanager
def temp_cwd():
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
yield tmpdir
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
# Run tests, trying to restore global state afterward
cwd = os.getcwd()
with np.errstate(), np.printoptions(), temp_cwd() as tmpdir, \
redirect_stderr(tmp_stderr):
# try to ensure random seed is NOT reproducible
np.random.seed(None)
ns = {}
for t in tests:
# We broke the tests up into chunks to try to avoid PSEUDOCODE
# This has the unfortunate side effect of restarting the global
# namespace for each test chunk, so variables will be "lost" after
# a chunk. Chain the globals to avoid this
t.globs.update(ns)
t.filename = short_path(t.filename, cwd)
# Process our options
if any([SKIPBLOCK in ex.options for ex in t.examples]):
continue
fails, successes = runner.run(t, out=output.write, clear_globs=False)
if fails > 0:
success = False
ns = t.globs
output.seek(0)
return success, output.read()
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""
Check code in docstrings of the module's public symbols.
Parameters
----------
module : ModuleType
Name of module
verbose : bool
Should the result be verbose
ns : dict
Name space of module
dots : bool
doctest_warnings : bool
Returns
-------
results : list
List of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except Exception:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""
Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Parameters
----------
fname : str
File name
verbose : bool
ns : dict
Name space
dots : bool
doctest_warnings : bool
Returns
-------
list
List of [(item_name, success_flag, output), ...]
Notes
-----
refguide can be signalled to skip testing code by adding
``#doctest: +SKIP`` to the end of the line. If the output varies or is
random, add ``# may vary`` or ``# random`` to the comment. for example
>>> plt.plot(...) # doctest: +SKIP
>>> random.randint(0,10)
5 # random
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
if ns is None:
ns = CHECK_NAMESPACE
results = []
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
with open(fname, encoding='utf-8') as f:
text = f.read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
base_line_no = 0
for part in text.split('\n\n'):
try:
tests = parser.get_doctest(part, ns, fname, fname, base_line_no)
except ValueError as e:
if e.args[0].startswith('line '):
# fix line number since `parser.get_doctest` does not increment
# the reported line number by base_line_no in the error message
parts = e.args[0].split()
parts[1] = str(int(parts[1]) + base_line_no)
e.args = (' '.join(parts),) + e.args[1:]
raise
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts.append((part, base_line_no))
base_line_no += part.count('\n') + 2
# Reassemble the good bits and doctest them:
tests = []
for good_text, line_no in good_parts:
tests.append(parser.get_doctest(good_text, ns, fname, fname, line_no))
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def iter_included_files(base_path, verbose=0, suffixes=('.rst',)):
"""
Generator function to walk `base_path` and its subdirectories, skipping
files or directories in RST_SKIPLIST, and yield each file with a suffix in
`suffixes`
Parameters
----------
base_path : str
Base path of the directory to be processed
verbose : int
suffixes : tuple
Yields
------
path
Path of the directory and its sub directories
"""
if os.path.exists(base_path) and os.path.isfile(base_path):
yield base_path
for dir_name, subdirs, files in os.walk(base_path, topdown=True):
if dir_name in RST_SKIPLIST:
if verbose > 0:
sys.stderr.write('skipping files in %s' % dir_name)
files = []
for p in RST_SKIPLIST:
if p in subdirs:
if verbose > 0:
sys.stderr.write('skipping %s and subdirs' % p)
subdirs.remove(p)
for f in files:
if (os.path.splitext(f)[1] in suffixes and
f not in RST_SKIPLIST):
yield os.path.join(dir_name, f)
def check_documentation(base_path, results, args, dots):
"""
Check examples in any *.rst located inside `base_path`.
Add the output to `results`.
See Also
--------
check_doctests_testfile
"""
for filename in iter_included_files(base_path, args.verbose):
if dots:
sys.stderr.write(filename + ' ')
sys.stderr.flush()
tut_results = check_doctests_testfile(
filename,
(args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
# stub out a "module" which is needed when reporting the result
def scratch():
pass
scratch.__name__ = filename
results.append((scratch, tut_results))
if dots:
sys.stderr.write('\n')
sys.stderr.flush()
def init_matplotlib():
"""
Check feasibility of matplotlib initialization.
"""
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
"""
Validates the docstrings of all the pre decided set of
modules for errors and docstring standards.
"""
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true",
help="Run also doctests on ")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--rst", nargs='?', const='doc', default=None,
help=("Run also examples from *rst files "
"discovered walking the directory(s) specified, "
"defaults to 'doc'"))
args = parser.parse_args(argv)
modules = []
names_dict = {}
if not args.module_names:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in module_names:
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
dots = True
success = True
results = []
errormsgs = []
if args.doctests or args.rst:
init_matplotlib()
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
if args.doctests or not args.rst:
print("Running checks for %d modules:" % (len(modules),))
for module in modules:
if dots:
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others,
module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write('\n')
sys.stderr.flush()
if args.rst:
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
rst_path = os.path.relpath(os.path.join(base_dir, args.rst))
if os.path.exists(rst_path):
print('\nChecking files in %s:' % rst_path)
check_documentation(rst_path, results, args, dots)
else:
sys.stderr.write(f'\ninvalid --rst argument "{args.rst}"')
errormsgs.append('invalid directory argument to --rst')
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
for module, mod_results in results:
success = all(x[1] for x in mod_results)
if not success:
errormsgs.append(f'failed checking {module.__name__}')
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if len(errormsgs) == 0:
print("\nOK: all checks passed!")
sys.exit(0)
else:
print('\nERROR: ', '\n '.join(errormsgs))
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| bsd-3-clause |
mirestrepo/voxels-at-lems | bvpl/bvpl_octree/pca_test_vs_train.py | 1 | 1887 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 7 14:38:46 2011
Plot pca test error vs train error
@author: -
"""
# Computes the gaussian gradients on a boxm_alpha_scene
import os;
import optparse;
import time;
import sys;
import plot_pca_functions;
import numpy as np
import matplotlib.pyplot as plt
import math
if __name__=="__main__":
#Parse inputs
# parser = optparse.OptionParser(description='Compute PCA basis');
#
# parser.add_option('--pca_dir', action="store", dest="pca_dir");
# options, args = parser.parse_args();
#
# pca_dir = options.pca_dir;
dirs=[];
dirs.append('/Users/isa/Experiments/PCA/site22/10');
#dirs.append('/Users/isa/Experiments/PCA/CapitolBOXM_6_4_4/10');
#dirs.append('/Users/isa/Experiments/PCA/DowntownBOXM_3_3_1/10');
labels=[];
#labels.append('Capitol Train');
#labels.append('Capitol Overall');
labels.append('Downtown Training Error');
labels.append('Downtown Overall Error');
dim=125;
i= 0;
fig = plt.figure(1);
for pca_dir in dirs:
print (pca_dir)
if not os.path.isdir( pca_dir + '/'):
sys.exit(-1);
train_error_file = pca_dir + "/normalized_training_error.txt";
overall_error = plot_pca_functions.read_test_error(pca_dir, dim);
train_error = plot_pca_functions.read_vector(train_error_file);
print(train_error);
print(overall_error);
x = np.arange(0, len(train_error), 1);
plt.plot(x, train_error, label=labels[i]);
plt.hold(True);
x = np.arange(0, len(train_error)+1, 5);
plt.plot(x, overall_error, label=labels[i+1]);
i=i+2;
plt.title('Overall error vs training error ',fontsize= 14);
plt.xlabel('Number of components used for reconstruction', fontsize= 14);
a = plt.gca()
a.set_xlim([0,125])
plt.ylabel('Average error per feature vector',fontsize= 14);
plt.legend();
plt.show();
| bsd-2-clause |
tornadomeet/mxnet | python/mxnet/model.py | 13 | 41314 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""MXNet model module"""
from __future__ import absolute_import, print_function
import os
import time
import logging
import warnings
from collections import namedtuple
import numpy as np
from . import io
from . import nd
from . import symbol as sym
from . import optimizer as opt
from . import metric
from . import kvstore as kvs
from .context import Context, cpu
from .initializer import Uniform
from .optimizer import get_updater
from .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data
from .io import DataDesc
from .base import mx_real_t
BASE_ESTIMATOR = object
try:
from sklearn.base import BaseEstimator
BASE_ESTIMATOR = BaseEstimator
except ImportError:
SKLEARN_INSTALLED = False
# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'eval_metric',
'locals'])
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = True
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device is 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore == 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
name = param_names[idx]
kvstore.init(name, arg_params[name])
if update_on_kvstore:
kvstore.pull(name, param_on_devs, priority=-idx)
def _update_params_on_kvstore_nccl(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on NCCL kvstore."""
valid_indices = [index for index, grad_list in
enumerate(grad_arrays) if grad_list[0] is not None]
valid_grad_arrays = [grad_arrays[i] for i in valid_indices]
valid_param_arrays = [param_arrays[i] for i in valid_indices]
valid_param_names = [param_names[i] for i in valid_indices]
size = len(valid_grad_arrays)
start = 0
# Use aggregation by default only with NCCL
default_batch = 16
batch = int(os.getenv('MXNET_UPDATE_AGGREGATION_SIZE', default_batch))
while start < size:
end = start + batch if start + batch < size else size
# push gradient, priority is negative index
kvstore.push(valid_param_names[start:end], valid_grad_arrays[start:end], priority=-start)
# pull back the weights
kvstore.pull(valid_param_names[start:end], valid_param_arrays[start:end], priority=-start)
start = end
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the weights
kvstore.pull(name, arg_list, priority=-index)
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None, param_names=None):
"""Perform update of param_arrays from grad_arrays not on kvstore."""
for i, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
index = i
if kvstore:
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the sum gradients, to the same locations.
kvstore.pull(name, grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution later
w, g = p
updater(index*num_device+k, g, w)
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,
arg_params, aux_params,
begin_epoch, end_epoch, epoch_size, optimizer,
kvstore, update_on_kvstore,
train_data, eval_data=None, eval_metric=None,
epoch_end_callback=None, batch_end_callback=None,
logger=None, work_load_list=None, monitor=None,
eval_end_callback=None,
eval_batch_end_callback=None, sym_gen=None):
"""Internal training function on multiple devices.
This function will also work for single device as well.
Parameters
----------
symbol : Symbol
The network configuration.
ctx : list of Context
The training devices.
arg_names: list of str
Name of all arguments of the network.
param_names: list of str
Name of all trainable parameters of the network.
aux_names: list of str
Name of all auxiliary states of the network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
begin_epoch : int
The begining training epoch.
end_epoch : int
The end training epoch.
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : Optimizer
The optimization algorithm
train_data : DataIter
Training data iterator.
eval_data : DataIter
Validation data iterator.
eval_metric : EvalMetric
An evaluation function or a list of evaluation functions.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback : callable(BatchEndParams)
A callback that is invoked at end of each batch.
This can be used to measure speed, get result from evaluation metric. etc.
kvstore : KVStore
The KVStore.
update_on_kvstore : bool
Whether or not perform weight updating on kvstore.
logger : logging logger
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ``ctx``.
monitor : Monitor, optional
Monitor installed to executor,
for monitoring outputs, weights, and gradients for debugging.
Notes
-----
- This function will inplace update the NDArrays in `arg_params` and `aux_states`.
"""
if logger is None:
logger = logging
executor_manager = DataParallelExecutorManager(symbol=symbol,
sym_gen=sym_gen,
ctx=ctx,
train_data=train_data,
param_names=param_names,
arg_names=arg_names,
aux_names=aux_names,
work_load_list=work_load_list,
logger=logger)
if monitor:
executor_manager.install_monitor(monitor)
executor_manager.set_params(arg_params, aux_params)
if not update_on_kvstore:
updater = get_updater(optimizer)
if kvstore:
_initialize_kvstore(kvstore=kvstore,
param_arrays=executor_manager.param_arrays,
arg_params=arg_params,
param_names=executor_manager.param_names,
update_on_kvstore=update_on_kvstore)
if update_on_kvstore:
kvstore.set_optimizer(optimizer)
# Now start training
train_data.reset()
for epoch in range(begin_epoch, end_epoch):
# Training phase
tic = time.time()
eval_metric.reset()
nbatch = 0
# Iterate over training data.
while True:
do_reset = True
for data_batch in train_data:
executor_manager.load_data_batch(data_batch)
if monitor is not None:
monitor.tic()
executor_manager.forward(is_train=True)
executor_manager.backward()
if update_on_kvstore:
if 'nccl' in kvstore.type:
_update_params_on_kvstore_nccl(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params_on_kvstore(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params(executor_manager.param_arrays,
executor_manager.grad_arrays,
updater=updater,
num_device=len(ctx),
kvstore=kvstore,
param_names=executor_manager.param_names)
if monitor is not None:
monitor.toc_print()
# evaluate at end, so we can lazy copy
executor_manager.update_metric(eval_metric, data_batch.label)
nbatch += 1
# batch callback (for print purpose)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
# this epoch is done possibly earlier
if epoch_size is not None and nbatch >= epoch_size:
do_reset = False
break
if do_reset:
logger.info('Epoch[%d] Resetting Data Iterator', epoch)
train_data.reset()
# this epoch is done
if epoch_size is None or nbatch >= epoch_size:
break
toc = time.time()
logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
if epoch_end_callback or epoch + 1 == end_epoch:
executor_manager.copy_to(arg_params, aux_params)
_multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)
# evaluation
if eval_data:
eval_metric.reset()
eval_data.reset()
total_num_batch = 0
for i, eval_batch in enumerate(eval_data):
executor_manager.load_data_batch(eval_batch)
executor_manager.forward(is_train=False)
executor_manager.update_metric(eval_metric, eval_batch.label)
if eval_batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_batch_end_callback, batch_end_params)
total_num_batch += 1
if eval_end_callback is not None:
eval_end_params = BatchEndParam(epoch=epoch,
nbatch=total_num_batch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_end_callback, eval_end_params)
eval_data.reset()
# end of all epochs
return
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params):
"""Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if symbol is not None:
symbol.save('%s-symbol.json' % prefix)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
def load_checkpoint(prefix, epoch):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
"""
symbol = sym.load('%s-symbol.json' % prefix)
save_dict = nd.load('%s-%04d.params' % (prefix, epoch))
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return (symbol, arg_params, aux_params)
from .callback import LogValidationMetricsCallback # pylint: disable=wrong-import-position
class FeedForward(BASE_ESTIMATOR):
"""Model class of MXNet for training and predicting feedforward nets.
This class is designed for a single-data single output supervised network.
Parameters
----------
symbol : Symbol
The symbol configuration of computation network.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi GPU training, pass in a list of gpu contexts.
num_epoch : int, optional
Training parameter, number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
Training parameter, name or optimizer object for training.
initializer : initializer function, optional
Training parameter, the initialization scheme used.
numpy_batch_size : int, optional
The batch size of training data.
Only needed when input array is numpy.
arg_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol
to be passed by aux_params and ``arg_params``.
If this is True, no error will be thrown when ``aux_params`` and ``arg_params``
contain more parameters than needed.
begin_epoch : int, optional
The begining training epoch.
kwargs : dict
The additional keyword arguments passed to optimizer.
"""
def __init__(self, symbol, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd',
initializer=Uniform(0.01),
numpy_batch_size=128,
arg_params=None, aux_params=None,
allow_extra_params=False,
begin_epoch=0,
**kwargs):
warnings.warn(
'\033[91mmxnet.model.FeedForward has been deprecated. ' + \
'Please use mxnet.mod.Module instead.\033[0m',
DeprecationWarning, stacklevel=2)
if isinstance(symbol, sym.Symbol):
self.symbol = symbol
self.sym_gen = None
else:
assert(callable(symbol))
self.symbol = None
self.sym_gen = symbol
# model parameters
self.arg_params = arg_params
self.aux_params = aux_params
self.allow_extra_params = allow_extra_params
self.argument_checked = False
if self.sym_gen is None:
self._check_arguments()
# basic configuration
if ctx is None:
ctx = [cpu()]
elif isinstance(ctx, Context):
ctx = [ctx]
self.ctx = ctx
# training parameters
self.num_epoch = num_epoch
self.epoch_size = epoch_size
self.kwargs = kwargs.copy()
self.optimizer = optimizer
self.initializer = initializer
self.numpy_batch_size = numpy_batch_size
# internal helper state
self._pred_exec = None
self.begin_epoch = begin_epoch
def _check_arguments(self):
"""verify the argument of the default symbol and user provided parameters"""
if self.argument_checked:
return
assert(self.symbol is not None)
self.argument_checked = True
# check if symbol contain duplicated names.
_check_arguments(self.symbol)
# rematch parameters to delete useless ones
if self.allow_extra_params:
if self.arg_params:
arg_names = set(self.symbol.list_arguments())
self.arg_params = {k : v for k, v in self.arg_params.items()
if k in arg_names}
if self.aux_params:
aux_names = set(self.symbol.list_auxiliary_states())
self.aux_params = {k : v for k, v in self.aux_params.items()
if k in aux_names}
@staticmethod
def _is_data_arg(name):
"""Check if name is a data argument."""
return name.endswith('data') or name.endswith('label')
def _init_params(self, inputs, overwrite=False):
"""Initialize weight parameters and auxiliary states."""
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]
input_shapes = {item.name: item.shape for item in inputs}
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None
input_dtypes = {item.name: item.dtype for item in inputs}
arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)
assert arg_dtypes is not None
arg_names = self.symbol.list_arguments()
input_names = input_shapes.keys()
param_names = [key for key in arg_names if key not in input_names]
aux_names = self.symbol.list_auxiliary_states()
param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)
if x[0] in param_names]
arg_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in param_name_attrs}
aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)
if x[0] in aux_names]
aux_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in aux_name_attrs}
for k, v in arg_params.items():
if self.arg_params and k in self.arg_params and (not overwrite):
arg_params[k][:] = self.arg_params[k][:]
else:
self.initializer(k, v)
for k, v in aux_params.items():
if self.aux_params and k in self.aux_params and (not overwrite):
aux_params[k][:] = self.aux_params[k][:]
else:
self.initializer(k, v)
self.arg_params = arg_params
self.aux_params = aux_params
return (arg_names, list(param_names), aux_names)
def __getstate__(self):
this = self.__dict__.copy()
this['_pred_exec'] = None
return this
def __setstate__(self, state):
self.__dict__.update(state)
def _init_predictor(self, input_shapes, type_dict=None):
"""Initialize the predictor module for running prediction."""
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**dict(input_shapes))
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
# for now only use the first device
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **dict(input_shapes))
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec
def _init_iter(self, X, y, is_train):
"""Initialize the iterator given input."""
if isinstance(X, (np.ndarray, nd.NDArray)):
if y is None:
if is_train:
raise ValueError('y must be specified when X is numpy.ndarray')
else:
y = np.zeros(X.shape[0])
if not isinstance(y, (np.ndarray, nd.NDArray)):
raise TypeError('y must be ndarray when X is numpy.ndarray')
if X.shape[0] != y.shape[0]:
raise ValueError("The numbers of data points and labels not equal")
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
if y.ndim != 1:
raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)")
if is_train:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),
shuffle=is_train, last_batch_handle='roll_over')
else:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)
if not isinstance(X, io.DataIter):
raise TypeError('X must be DataIter, NDArray or numpy.ndarray')
return X
def _init_eval_iter(self, eval_data):
"""Initialize the iterator given eval_data."""
if eval_data is None:
return eval_data
if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:
if eval_data[0] is not None:
if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):
return eval_data[0]
input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)
else eval_data[0])
input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)
else eval_data[1])
return self._init_iter(input_data, input_label, is_train=True)
else:
raise ValueError("Eval data is NONE")
if not isinstance(eval_data, io.DataIter):
raise TypeError('Eval data must be DataIter, or ' \
'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')
return eval_data
def predict(self, X, num_batch=None, return_data=False, reset=True):
"""Run the prediction, always only use one device.
Parameters
----------
X : mxnet.DataIter
num_batch : int or None
The number of batch to run. Go though all batches if ``None``.
Returns
-------
y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.
The predicted value of the output.
"""
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
batch_size = X.batch_size
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
output_list = [[] for _ in range(len(self._pred_exec.outputs))]
if return_data:
data_list = [[] for _ in X.provide_data]
label_list = [[] for _ in X.provide_label]
i = 0
for batch in X:
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
padded = batch.pad
real_size = batch_size - padded
for o_list, o_nd in zip(output_list, self._pred_exec.outputs):
o_list.append(o_nd[0:real_size].asnumpy())
if return_data:
for j, x in enumerate(batch.data):
data_list[j].append(x[0:real_size].asnumpy())
for j, x in enumerate(batch.label):
label_list[j].append(x[0:real_size].asnumpy())
i += 1
if num_batch is not None and i == num_batch:
break
outputs = [np.concatenate(x) for x in output_list]
if len(outputs) == 1:
outputs = outputs[0]
if return_data:
data = [np.concatenate(x) for x in data_list]
label = [np.concatenate(x) for x in label_list]
if len(data) == 1:
data = data[0]
if len(label) == 1:
label = label[0]
return outputs, data, label
else:
return outputs
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
def fit(self, X, y=None, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,
work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None):
"""Fit the model.
Parameters
----------
X : DataIter, or numpy.ndarray/NDArray
Training data. If `X` is a `DataIter`, the name or (if name not available)
the position of its outputs should match the corresponding variable
names defined in the symbolic graph.
y : numpy.ndarray/NDArray, optional
Training set label.
If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.
While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be
the same as `X`, i.e. the number of data points and labels should be equal.
eval_data : DataIter or numpy.ndarray/list/NDArray pair
If eval_data is numpy.ndarray/list/NDArray pair,
it should be ``(valid_data, valid_label)``.
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. This could be the name of evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for purposes of printing.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'
In default uses 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Note
----
KVStore behavior
- 'local', multi-devices on a single machine, will automatically choose best type.
- 'dist_sync', multiple machines communicating via BSP.
- 'dist_async', multiple machines with asynchronous communication.
"""
data = self._init_iter(X, y, is_train=True)
eval_data = self._init_eval_iter(eval_data)
if self.sym_gen:
self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member
self._check_arguments()
self.kwargs["sym"] = self.symbol
arg_names, param_names, aux_names = \
self._init_params(data.provide_data+data.provide_label)
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
# create kvstore
(kvstore, update_on_kvstore) = _create_kvstore(
kvstore, len(self.ctx), self.arg_params)
param_idx2name = {}
if update_on_kvstore:
param_idx2name.update(enumerate(param_names))
else:
for i, n in enumerate(param_names):
for k in range(len(self.ctx)):
param_idx2name[i*len(self.ctx)+k] = n
self.kwargs["param_idx2name"] = param_idx2name
# init optmizer
if isinstance(self.optimizer, str):
batch_size = data.batch_size
if kvstore and 'dist' in kvstore.type and not '_async' in kvstore.type:
batch_size *= kvstore.num_workers
optimizer = opt.create(self.optimizer,
rescale_grad=(1.0/batch_size),
**(self.kwargs))
elif isinstance(self.optimizer, opt.Optimizer):
optimizer = self.optimizer
# do training
_train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,
self.arg_params, self.aux_params,
begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,
epoch_size=self.epoch_size,
optimizer=optimizer,
train_data=data, eval_data=eval_data,
eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore, update_on_kvstore=update_on_kvstore,
logger=logger, work_load_list=work_load_list, monitor=monitor,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback,
sym_gen=self.sym_gen)
def save(self, prefix, epoch=None):
"""Checkpoint the model checkpoint into file.
You can also use `pickle` to do the job if you only work on Python.
The advantage of `load` and `save` (as compared to `pickle`) is that
the resulting file can be loaded from other MXNet language bindings.
One can also directly `load`/`save` from/to cloud storage(S3, HDFS)
Parameters
----------
prefix : str
Prefix of model name.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if epoch is None:
epoch = self.num_epoch
assert epoch is not None
save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params)
@staticmethod
def load(prefix, epoch, ctx=None, **kwargs):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
epoch number of model we would like to load.
ctx : Context or list of Context, optional
The device context of training and prediction.
kwargs : dict
Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.
Returns
-------
model : FeedForward
The loaded model that can be used for prediction.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)
return FeedForward(symbol, ctx=ctx,
arg_params=arg_params, aux_params=aux_params,
begin_epoch=epoch,
**kwargs)
@staticmethod
def create(symbol, X, y=None, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),
eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None,
kvstore='local', logger=None, work_load_list=None,
eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None, **kwargs):
"""Functional style to create a model.
This function is more consistent with functional
languages such as R, where mutation is not allowed.
Parameters
----------
symbol : Symbol
The symbol configuration of a computation network.
X : DataIter
Training data.
y : numpy.ndarray, optional
If `X` is a ``numpy.ndarray``, `y` must be set.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi-GPU training, pass in a list of GPU contexts.
num_epoch : int, optional
The number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
The name of the chosen optimizer, or an optimizer object, used for training.
initializer : initializer function, optional
The initialization scheme used.
eval_data : DataIter or numpy.ndarray pair
If `eval_set` is ``numpy.ndarray`` pair, it should
be (`valid_data`, `valid_label`).
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. Can be the name of an evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for print purposes.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.
Defaults to 'local', often no need to change for single machine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
"""
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,
epoch_size=epoch_size,
optimizer=optimizer, initializer=initializer, **kwargs)
model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
logger=logger,
work_load_list=work_load_list,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback)
return model
| apache-2.0 |
michaelpacer/pyhawkes | experiments/discrete_continuous_comparison.py | 2 | 5308 | import time
import numpy as np
np.random.seed(1111)
np.seterr(over="raise")
import cPickle, os
from hips.plotting.layout import create_figure
import matplotlib.pyplot as plt
import brewer2mpl
colors = brewer2mpl.get_map("Set1", "Qualitative", 9).mpl_colors
# goodcolors = np.array([0,1,2,4,6,7,8])
# colors = np.array(colors)[goodcolors]
from pybasicbayes.util.general import ibincount
from pybasicbayes.util.text import progprint_xrange
import pyhawkes.models
reload(pyhawkes.models)
# Set globals
K = 10
B = 3
dt = 1
dt_max = 10.
T = 100.
network_hypers = {'C': 1, 'kappa': 1., 'c': np.zeros(K, dtype=np.int), 'p': 1*np.ones((1,1)), 'v': 10.}
def generate_dataset(bias=1.):
# Create the model with these parameters
network_hypers = {'C': 1, 'kappa': 1., 'c': np.zeros(K, dtype=np.int), 'p': 1*np.ones((1,1)), 'v': 100.}
bkgd_hypers = {"alpha": 3., "beta": 3./bias}
dt_model = pyhawkes.models.\
DiscreteTimeNetworkHawkesModelSpikeAndSlab(K=K, dt=dt, dt_max=dt_max, B=B,
bkgd_hypers=bkgd_hypers,
network_hypers=network_hypers)
# dt_model.bias_model.lambda0 = bias * np.ones(K)
assert dt_model.check_stability()
S_dt,_ = dt_model.generate(T=int(np.ceil(T/dt)), keep=False)
print "sampled dataset with ", S_dt.sum(), "events"
# Convert S_dt to continuous time
S_ct = dt * np.concatenate([ibincount(S) for S in S_dt.T]).astype(float)
S_ct += dt * np.random.rand(*S_ct.shape)
assert np.all(S_ct < T)
C_ct = np.concatenate([k*np.ones(S.sum()) for k,S in enumerate(S_dt.T)]).astype(int)
# Sort the data
perm = np.argsort(S_ct)
S_ct = S_ct[perm]
C_ct = C_ct[perm]
return S_dt, S_ct, C_ct
def fit_discrete_time_model_gibbs(S_dt, N_samples=100):
# Now fit a DT model
dt_model_test = pyhawkes.models.\
DiscreteTimeNetworkHawkesModelSpikeAndSlab(K=K, dt=dt, dt_max=dt_max, B=B,
network_hypers=network_hypers)
dt_model_test.add_data(S_dt)
tic = time.time()
for iter in progprint_xrange(N_samples, perline=25):
dt_model_test.resample_model()
toc = time.time()
return (toc-tic) / N_samples
def fit_continuous_time_model_gibbs(S_ct, C_ct, N_samples=100):
# Now fit a DT model
ct_model = pyhawkes.models.\
ContinuousTimeNetworkHawkesModel(K, dt_max=dt_max,
network_hypers=network_hypers)
ct_model.add_data(S_ct, C_ct, T)
tic = time.time()
for iter in progprint_xrange(N_samples, perline=25):
ct_model.resample_model()
toc = time.time()
return (toc-tic) / N_samples
# def run_time_vs_bias():
if __name__ == "__main__":
# run_time_vs_bias()
# biases = np.logspace(-1,1, num=10)
res_file = os.path.join("results", "run_time_vs_rate_2.pkl")
if os.path.exists(res_file):
print "Loading results from ", res_file
with open(res_file, "r") as f:
events_per_bin, dt_times, ct_times = cPickle.load(f)
else:
biases = np.linspace(10**-1,3**1, num=5)
N_runs_per_bias = 5
N_samples = 100
events_per_bin = []
dt_times = []
ct_times = []
for bias in biases:
for iter in xrange(N_runs_per_bias):
print "Bias ", bias, " Run (%d/%d)" % (iter, N_runs_per_bias)
S_dt, S_ct, C_ct = generate_dataset(bias)
events_per_bin.append(S_dt.sum() / float(S_dt.size))
dt_times.append(fit_discrete_time_model_gibbs(S_dt, N_samples))
ct_times.append(fit_continuous_time_model_gibbs(S_ct, C_ct, N_samples))
with open(res_file, "w") as f:
cPickle.dump((events_per_bin, dt_times, ct_times), f, protocol=-1)
events_per_bin = np.array(events_per_bin)
dt_times = np.array(dt_times)
ct_times = np.array(ct_times)
perm = np.argsort(events_per_bin)
events_per_bin = events_per_bin[perm]
dt_times = dt_times[perm]
ct_times = ct_times[perm]
# Plot the results
fig = create_figure(figsize=(2.5,2.5))
fig.set_tight_layout(True)
ax = fig.add_subplot(111)
# Plot DT data
ax.plot(events_per_bin, dt_times, 'o', linestyle="none",
markerfacecolor=colors[2], markeredgecolor=colors[2], markersize=4,
label="Discrete")
# Plot linear fit
p_dt = np.poly1d(np.polyfit(events_per_bin, dt_times, deg=1))
dt_pred = p_dt(events_per_bin)
ax.plot(events_per_bin, dt_pred, ':', lw=2, color=colors[2])
# Plot CT data
ax.plot(events_per_bin, ct_times, 's', linestyle="none",
markerfacecolor=colors[7], markeredgecolor=colors[7], markersize=4,
label="Continuous")
# Plot quadratic fit
p_ct = np.poly1d(np.polyfit(events_per_bin, ct_times, deg=2))
ct_pred = p_ct(sorted(events_per_bin))
ax.plot(events_per_bin, ct_pred, ':', lw=2, color=colors[7])
plt.xlabel("Events per bin")
# plt.xlim(0, events_per_bin.max())
plt.xlim(0, 6)
plt.ylabel("time per iter [sec]")
plt.ylim(0, 0.15)
plt.legend(loc="upper left", prop={"size": 8})
fig.savefig(os.path.join("results", "discrete_cont_comparison.pdf"))
plt.show()
| mit |
vlarson/class-scripts | arm_analysis/arm_utilities.py | 1 | 6034 | # -*- coding: utf-8 -*-
"""
Analyze ARM observations.
"""
def plotSfcRad(beflux_file, min_sdn):
"""Plot surface radiative fields."""
# Point to directory containing ARM observations
#data_dir = '/home/studi/Larson/arm_data_files/'
#beflux_file = data_dir + 'sgpbeflux1longC1.c1.20131204.000000.custom.cdf'
#beflux_file = data_dir + 'sgpbeflux1longC1.c1.20131205.000000.custom.cdf'
#beflux_file = data_dir + 'sgpbeflux1longC1.c1.20131206.000000.custom.cdf'
#beflux_file = data_dir + 'sgpbeflux1longC1.c1.20131207.000000.custom.cdf'
#beflux_file = data_dir + 'sgpbeflux1longC1.c1.20131208.000000.custom.cdf'
# SDN showed a few clouds on 20131215:
#beflux_file = data_dir + 'sgpbeflux1longC1.c1.20131215.000000.custom.cdf'
# 20131217 had essentially clear skies
#beflux_file = data_dir + 'sgpbeflux1longC1.c1.20131217.000000.custom.cdf'
#beflux_file = data_dir + 'sgpbeflux1longC1.c1.20131218.000000.custom.cdf'
# Import libraries
from numpy import fmax
import matplotlib.pyplot as plt
from scipy.io import netcdf
import pdb
beflux_nc = netcdf.netcdf_file(beflux_file, 'r')
time_offset_beflux = beflux_nc.variables['time_offset']
short_direct_normal = beflux_nc.variables['short_direct_normal']
# Replace small values with threshold, for plotting time series
sdn_floored = fmax(min_sdn,short_direct_normal[:])
# Remove small values from time series, thereby shortening vector length
sdn_truncated = [x for x in short_direct_normal if x > min_sdn]
#pdb.set_trace()
# Plot time series of shortwave direct normal flux
plt.clf()
plt.subplot(211)
plt.plot(time_offset_beflux[:],sdn_floored[:])
plt.xlabel('Time')
plt.ylabel('Shortwave direct normal flux')
#pdb.set_trace()
# Plot histogram of shortwave direct normal flux
plt.subplot(212)
n, bins, patches = plt.hist(sdn_truncated[:], 50, normed=1, histtype='stepfilled')
plt.setp(patches, 'facecolor', 'g', 'alpha', 0.75)
plt.xlabel('Shortwave direct normal flux')
plt.ylabel('Probability')
plt.figure()
plt.show()
return
def findTruncNormalRoots(truncMean,truncVarnce,muInit,sigmaInit,min_refl):
"""Returns parameters mu and sigma of a truncated normal distribution.
Inputs:
truncMean = mean of truncated part of full (untruncated) normal
truncVarnce = variance of truncated part of full normal
muInit = first guess value of mu, the mean of the full normal
sigmaInit = first guess value of sigma, the standard deviation of the full normal
min_refl = minimum (left) threshold value at which normal is truncated
"""
from scipy.optimize import root
sol = root(findTruncMeanVarnceFncResid, [muInit, sigmaInit],
args=(truncMean,truncVarnce,min_refl), jac=False, method='hybr')
mu = sol.x[0]
sigma = sol.x[1]
return (mu, sigma)
def findTruncMeanVarnceFncResid(x,truncMean,truncVarnce,min_refl):
"""Evaluates the residual of the function that we want to zero
in order to solve for the parameters of a truncated normal distribution.
Inputs:
x[0] = mean of untruncated (full) normal
x[1] = standard deviation of untruncated normal
truncMean = mean of truncated part of full normal
truncVarnce = variance of truncated part of full normal
min_refl = minimum (left) threshold value at which normal is truncated
Output:
Residual of the function to be zeroed
"""
mu = x[0]
sigma = x[1]
alpha = (min_refl-mu)/sigma
lambdaAlpha = lambdaFnc(alpha)
truncMeanResid = mu + sigma * lambdaAlpha - truncMean
truncVarnceResid = sigma**2 * ( 1.0 - deltaFnc(alpha,lambdaAlpha) ) - truncVarnce
return (truncMeanResid, truncVarnceResid)
def lambdaFnc(alpha):
"""A utility function used to relate moments and parameters of a truncated normal."""
from scipy.stats import norm
from numpy import finfo, amax
return norm.pdf(alpha) / amax([ 1.0 - norm.cdf(alpha) , finfo(float).eps ])
def deltaFnc(alpha,lambdaAlpha):
"""A utility function used to relate variance and sigma parameter of a truncated normal."""
return lambdaAlpha * ( lambdaAlpha - alpha )
def findKSDn(cdf1, cdf2):
"""Computes the Kolmogorov-Smirnov statistic, Dn.
Used to determine if two distributions are different.
Inputs:
cdf1 = first cumulative distribution function
cdf2 = second cumulative distribution function
Output:
Dn = KS statistic"""
from numpy import abs, amax
Dn = amax( abs( cdf1 - cdf2 ) )
return Dn
def calcMeanAlbedo(reflCloudBlockFilled, meanReflCloudBlock, meanLWP):
"""Calculate mean albedo of cloud layer, which depends on vertical overlap.
Inputs:
reflCloudBlockFilled = Block of cloud reflectivity values - minimum reflectivity.
Out-of-cloud values are filled with 0.
Each column is an altitude level.
Each row is a vertical profile at a different time.
meanReflCloudBlock = within-cloud mean of reflCloudBlock
meanLWP = within-cloud mean of LWP
Output:
meanAlbedo = average reflectivity of cloud layer"""
from numpy import std, sqrt, mean, sum
import pdb
# Sum reflectivity in vertical
sumReflCloudBlockFilled = sum(reflCloudBlockFilled,axis=1)
# LWP = Liquid water path. Assume it is linearly proportional to refl.
LWP = meanLWP * sumReflCloudBlockFilled / meanReflCloudBlock
# tau = optical depth
# According to Brenguier et al. (2011), tau ~= 0.15 LWP,
# where LWP is in g/m**2.
# According to Hartmann et al. (1992), tau ~= 9 for medium clouds
tau = 0.15 * LWP
# albedo = cloud reflectivity, 0 <= albedo <= 1
albedo = tau / (9.0 + tau)
meanAlbedo = mean(albedo)
#pdb.set_trace()
return (meanAlbedo, LWP) | gpl-2.0 |
arjoly/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 254 | 2005 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
macks22/scikit-learn | sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
vladpopovici/WSItk | WSItk/descriptors/txtgrey.py | 2 | 28559 | """
DESCRIPTORS.TXTGREY: textural descriptors from grey-scale images.
@author: vlad
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
__version__ = 0.05
__author__ = 'Vlad Popovici'
__all__ = ['GaborDescriptor', 'LBPDescriptor', 'GLCMDescriptor', 'HOGDescriptor',
'HistDescriptor', 'HaarLikeDescriptor', 'MFSDescriptor', 'StatsDescriptor']
# from abc import ABCMeta, abstractmethod
import numpy as np
from numpy import dot
from scipy.stats.mstats import kurtosis, skew
from matplotlib.cbook import flatten
from future.utils import bytes_to_native_str as nstr
from scipy import ndimage as nd
from scipy.linalg import norm
from scipy.stats import entropy
from scipy.signal import convolve2d
from skimage.filters import gabor_kernel
from skimage.util import img_as_float
from skimage.feature.texture import greycoprops, greycomatrix, \
local_binary_pattern
from skimage.exposure import rescale_intensity
from skimage.feature import hog
# from skimage.transform import integral_image
from .basic import *
class GaborDescriptor(LocalDescriptor):
"""
Computes Gabor descriptors from an image. These descriptors are the means
and variances of the filter responses obtained by convolving an image with
a bank of Gabor filters.
"""
name = nstr(b'gabor')
def __init__(self, theta=np.array([0.0, np.pi / 4.0, np.pi / 2.0, 3.0 * np.pi / 4.0],
dtype=np.double),
freq=np.array([3.0 / 4.0, 3.0 / 8.0, 3.0 / 16.0], dtype=np.double),
sigma=np.array([1.0, 2 * np.sqrt(2.0)], dtype=np.double),
normalized=True):
"""
Initialize the Gabor kernels (only real part).
Args:
theta: numpy.ndarray (vector)
Contains the orientations of the filter; defaults to [0, pi/4, pi/2, 3*pi/4].
freq: numpy.ndarray (vector)
The frequencies of the Gabor filter; defaults to [3/4, 3/8, 3/16].
sigma: numpy.ndarray (vector)
The sigma parameter for the Gaussian smoothing filter; defaults to [1, 2*sqrt(2)].
normalized: bool
If true, the kernels are normalized
"""
self.kernels_ = [np.real(gabor_kernel(frequency=f, theta=t, sigma_x=s,
sigma_y=s))
for f in freq for s in sigma for t in theta]
if normalized:
for k, krn in enumerate(self.kernels_):
self.kernels_[k] = krn / np.sqrt((krn ** 2).sum())
return
def compute(self, image):
"""
Compute the Gabor descriptors on the given image.
Args:
image: numpy.ndarray (.ndim=2)
Grey scale image.
Returns:
numpy.ndarray (vector) containing the Gabor descriptors (means followed
by the variances of the filter responses)
"""
try:
image = img_as_float(image)
nk = len(self.kernels_)
ft = np.zeros(2 * nk, dtype=np.double)
for k, krn in enumerate(self.kernels_):
flt = nd.convolve(image, krn, mode='wrap')
ft[k] = flt.mean()
ft[k + nk] = flt.var()
except:
print("Error in GaborDescriptor.compute()")
return ft
@staticmethod
def dist(ft1, ft2, method='euclidean'):
"""
Compute the distance between two sets of Gabor features. Possible distance types
are:
-euclidean
-cosine distance: this is not a proper distance!
"""
dm = {'euclidean': lambda x_, y_: norm(x_ - y_),
'cosine': lambda x_, y_: dot(x_, y_) / (norm(x_) * norm(y_))
}
method = method.lower()
if method not in dm:
raise ValueError('Unknown method')
return dm[method](ft1, ft2)
## end class GaborDescriptors
class GLCMDescriptor(LocalDescriptor):
"""
Grey Level Co-occurrence Matrix: the image is decomposed into a number of
non-overlapping regions, and the GLCM features are computed on each of these
regions.
"""
name = nstr(b'glcm')
def __init__(self, wsize, dist=0.0, theta=0.0, levels=256, which=None,
symmetric=True, normed=True):
"""
Initialize GLCM.
Args:
wsize: uint
window size: the image is decomposed into small non-overlapping regions of size
<wsize x wsize> from which the GLCMs are computed. If the last region in a row or
the last row in an image are smaller than the required size, then they are not
used in computing the features.
dist: uint
pair distance
theta: float
pair angle
levels: uint
number of grey levels
which: string
which features to be computed from the GLCM. See the help for
skimage.feature.texture.greycoprops for details
symmetric: bool
consider symmetric pairs?
normed: bool
normalize the co-occurrence matrix, before computing the features?
"""
self.wsize_ = wsize
self.dist_ = dist
self.theta_ = theta
self.levels_ = levels
if which is None:
which = ['dissimilarity', 'correlation']
self.which_feats_ = [w.lower() for w in which]
self.symmetric_ = symmetric
self.normed_ = normed
return
def compute(self, image):
"""
Compute the GLCM features.
"""
assert (image.ndim == 2)
w, h = image.shape
nw = int(w / self.wsize_)
nh = int(h / self.wsize_)
nf = len(self.which_feats_)
ft = np.zeros((nf, nw * nh)) # features will be on rows
k = 0
for x in np.arange(0, nw):
for y in np.arange(0, nh):
x0, y0 = x * self.wsize_, y * self.wsize_
x1, y1 = x0 + self.wsize_, y0 + self.wsize_
glcm = greycomatrix(image[y0:y1, x0:x1],
self.dist_, self.theta_, self.levels_,
self.symmetric_, self.normed_)
ft[:, k] = np.array([greycoprops(glcm, f)[0, 0] for f in self.which_feats_])
k += 1
res = {}
k = 0
for f in self.which_feats_:
res[f] = ft[k, :]
k += 1
return res
@staticmethod
def dist(ft1, ft2, method='bh'):
"""
Computes the distance between two sets of GLCM features. The features are
assumed to have been computed using the same parameters. The distance is
based on comparing the distributions of these features.
Args:
ft1, ft2: dict
each dictionary contains for each feature a vector of values computed
from the images
method: string
the method used for computing the distance between the histograms of features:
'kl' - Kullback-Leibler divergence (symmetrized by 0.5*(KL(p,q)+KL(q,p))
'js' - Jensen-Shannon divergence: 0.5*(KL(p,m)+KL(q,m)) where m=(p+q)/2
'bh' - Bhattacharyya distance: -log(sqrt(sum_i (p_i*q_i)))
'ma' - Matusita distance: sqrt(sum_i (sqrt(p_i)-sqrt(q_i))**2)
Returns:
dict
a dictionary with distances computed between pairs of features
"""
# distance methods
dm = {'kl': lambda x_, y_: 0.5 * (entropy(x_, y_) + entropy(y_, x_)),
'js': lambda x_, y_: 0.5 * (entropy(x_, 0.5 * (x_ + y_)) + entropy(y_, 0.5 * (x_ + y_))),
'bh': lambda x_, y_: -np.log(np.sum(np.sqrt(x_ * y_))),
'ma': lambda x_, y_: np.sqrt(np.sum((np.sqrt(x_) - np.sqrt(y_)) ** 2))
}
method = method.lower()
if method not in dm.keys():
raise ValueError('Unknown method')
res = {}
for k in ft1.keys():
if k in ft2.keys():
# build the histograms:
mn = min(ft1[k].min(), ft2[k].min())
mx = max(ft1[k].max(), ft2[k].max())
h1, _ = np.histogram(ft1[k], normed=True, bins=10, range=(mn, mx))
h2, _ = np.histogram(ft2[k], normed=True, bins=10, range=(mn, mx))
res[k] = dm[method](h1, h2)
return res
# end class GLCMDescriptors
class LBPDescriptor(LocalDescriptor):
"""
Local Binary Pattern for texture description. A LBP descriptor set is a
histogram of LBPs computed from the image.
"""
name = nstr(b'lbp')
def __init__(self, radius=3, npoints=None, method='uniform'):
"""
Initialize a LBP descriptor set. See skimage.feature.texture.local_binary_pattern
for details on the meaning of parameters.
Args:
radius: int
defaults to 3
npoints: int
defaults to None. If None, npoints is set to 8*radius
method: string
defaults to 'uniform'. Could be 'uniform', 'ror', 'var', 'nri_uniform'
"""
self.radius_ = radius
self.npoints_ = radius * 8 if npoints is None else npoints
self.method_ = method.lower()
self.nhbins_ = self.npoints_ + 2
return
def compute(self, image):
"""
Compute the LBP features. These features are returned as histograms of
LBPs.
"""
try:
lbp = local_binary_pattern(image, self.npoints_, self.radius_, self.method_)
hist, _ = np.histogram(lbp, normed=True, bins=self.nhbins_, range=(0, self.nhbins_))
except:
print("Error in LBPDescriptor.compute()")
return hist
@staticmethod
def dist(ft1, ft2, method='bh'):
"""
Computes the distance between two sets of LBP features. The features are
assumed to have been computed using the same parameters. The features
are represented as histograms of LBPs.
Args:
ft1, ft2: numpy.ndarray (vector)
histograms of LBPs as returned by compute()
method: string
the method used for computing the distance between the two sets of features:
'kl' - Kullback-Leibler divergence (symmetrized by 0.5*(KL(p,q)+KL(q,p))
'js' - Jensen-Shannon divergence: 0.5*(KL(p,m)+KL(q,m)) where m=(p+q)/2
'bh' - Bhattacharyya distance: -log(sqrt(sum_i (p_i*q_i)))
'ma' - Matusita distance: sqrt(sum_i (sqrt(p_i)-sqrt(q_i))**2)
"""
# distance methods
dm = {'kl': lambda x_, y_: 0.5 * (entropy(x_, y_) + entropy(y_, x_)),
'js': lambda x_, y_: 0.5 * (entropy(x_, 0.5 * (x_ + y_)) + entropy(y_, 0.5 * (x_ + y_))),
'bh': lambda x_, y_: -np.log(np.sum(np.sqrt(x_ * y_))),
'ma': lambda x_, y_: np.sqrt(np.sum((np.sqrt(x_) - np.sqrt(y_)) ** 2))
}
method = method.lower()
if method not in dm.keys():
raise ValueError('Unknown method')
return dm[method](ft1, ft2)
# end class LBPDescriptors
# MFSDescriptors - Multi-Fractal Dimensions
class MFSDescriptor(LocalDescriptor):
"""
Multi-Fractal Dimensions for texture description.
Adapted from IMFRACTAL project at https://github.com/rbaravalle/imfractal
"""
name = nstr(b'fract')
def __init__(self, _nlevels_avg=1, _wsize=15, _niter=1):
"""
Initialize an MFDDescriptors object.
Arguments:
_nlevels_avg: number of levels to be averaged in density computation (uint)
=1: no averaging
_wsize: size of the window for computing descriptors (uint)
_niter: number of iterations
"""
self.nlevels_avg = _nlevels_avg
self.wsize = _wsize
self.niter = _niter
return
def compute(self, im):
"""
Computes MFS over the given image.
Arguments:
im: image (grey-scale) (numpy.ndarray)
Returns:
a vector of descriptors (numpy.array)
"""
## TODO: this needs much polishing to get it run faster!
assert (im.ndim == 2)
# Using [0..255] to denote the intensity profile of the image
grayscale_box = [0, 255]
# Preprocessing: default intensity value of image ranges from 0 to 255
if abs(im).max() < 1:
im = rescale_intensity(im, out_range=(0, 255))
#######################
### Estimating density function of the image
### by solving least squares for D in the equation
### log10(bw) = D*log10(c) + b
r = 1.0 / max(im.shape)
c = np.log10(r * np.arange(start=1, stop=self.nlevels_avg + 1))
bw = np.zeros((self.nlevels_avg, im.shape[0], im.shape[1]), dtype=np.float32)
bw[0, :, :] = im + 1
def _gauss_krn(size):
""" Returns a normalized 2D gauss kernel array for convolutions """
if size <= 3:
sigma = 1.5
else:
sigma = size / 2.0
y, x = np.mgrid[-(size - 1.0) / 2.0:(size - 1.0) / 2.0 + 1, -(size - 1.0) / 2.0:(size - 1.0) / 2.0 + 1]
s2 = 2.0 * sigma ** 2
g = np.exp(-(x ** 2 + y ** 2) / s2)
return g / g.sum()
k = 1
if self.nlevels_avg > 1:
bw[1, :, :] = convolve2d(bw[0, :, :], _gauss_krn(k + 1), mode="full")[1:, 1:] * ((k + 1) ** 2)
for k in np.arange(2, self.nlevels_avg):
temp = convolve2d(bw[0, :, :], _gauss_krn(k + 1), mode="full") * ((k + 1) ** 2)
if k == 4:
bw[k] = temp[k - 1 - 1:temp.shape[0] - (k / 2), k - 1 - 1:temp.shape[1] - (k / 2)]
else:
bw[k] = temp[k - 1:temp.shape[0] - (1), k - 1:temp.shape[1] - (1)]
bw = np.log10(bw)
n1 = np.sum(c ** 2)
n2 = bw[0] * c[0]
for k in np.arange(1, self.nlevels_avg):
n2 += bw[k] * c[k]
sum3 = np.sum(bw, axis=0)
if self.nlevels_avg > 1:
D = (n2 * self.nlevels_avg - c.sum() * sum3) / (n1 * self.nlevels_avg - c.sum() ** 2)
min_D, max_D = 1.0, 4.0
D = grayscale_box[1] * (D - min_D) / (max_D - min_D) + grayscale_box[0]
else:
D = im
D = D[self.nlevels_avg - 1:D.shape[0] - self.nlevels_avg + 1,
self.nlevels_avg - 1:D.shape[1] - self.nlevels_avg + 1]
IM = np.zeros(D.shape)
gap = np.ceil((grayscale_box[1] - grayscale_box[0]) / np.float32(self.wsize))
center = np.zeros(self.wsize)
for k in np.arange(1, self.wsize + 1):
bin_min = (k - 1) * gap
bin_max = k * gap - 1
center[k - 1] = round((bin_min + bin_max) / 2.0)
D = ((D <= bin_max) & (D >= bin_min)).choose(D, center[k - 1])
D = ((D >= bin_max)).choose(D, 0)
D = ((D < 0)).choose(D, 0)
IM = D
# Constructing the filter for approximating log fitting
r = max(IM.shape)
c = np.zeros(self.niter)
c[0] = 1;
for k in range(1, self.niter):
c[k] = c[k - 1] / (k + 1)
c = c / sum(c);
# Construct level sets
Idx_IM = np.zeros(IM.shape);
for k in range(0, self.wsize):
IM = (IM == center[k]).choose(IM, k + 1)
Idx_IM = IM
IM = np.zeros(IM.shape)
# Estimate MFS by box-counting
num = np.zeros(self.niter)
MFS = np.zeros(self.wsize)
for k in range(1, self.wsize + 1):
IM = np.zeros(IM.shape)
IM = (Idx_IM == k).choose(Idx_IM, 255 + k)
IM = (IM < 255 + k).choose(IM, 0)
IM = (IM > 0).choose(IM, 1)
temp = max(IM.sum(), 1)
num[0] = np.log10(temp) / np.log10(r);
for j in range(2, self.niter + 1):
mask = np.ones((j, j))
bw = convolve2d(IM, mask, mode="full")[1:, 1:]
indx = np.arange(0, IM.shape[0], j)
indy = np.arange(0, IM.shape[1], j)
bw = bw[np.ix_(indx, indy)]
idx = (bw > 0).sum()
temp = max(idx, 1)
num[j - 1] = np.log10(temp) / np.log10(r / j)
MFS[k - 1] = sum(c * num)
return MFS
@staticmethod
def dist(ft1, ft2, method='euclidean'):
"""
Compute the distance between two sets of multifractal dimension features.
Possible distance types are:
-Euclidean
-cosine distance: this is not a proper distance!
"""
assert (ft1.ndim == ft2.ndim == 1)
assert (ft1.size == ft2.size)
dm = {'euclidean': lambda x_, y_: norm(x_ - y_),
'cosine': lambda x_, y_: dot(x_, y_) / (norm(x_) * norm(y_))
}
method = method.lower()
if method not in dm.keys():
raise ValueError('Unknown method')
return dm[method](ft1, ft2)
# end class MFSDescriptors
class HOGDescriptor(LocalDescriptor):
"""
Provides local descriptors in terms of histograms of oriented gradients.
"""
name = nstr(b'hog')
def __init__(self, _norient=9, _ppc=(128, 128), _cpb=(4, 4)):
"""
Initialize an HOGDescriptors object. For details see the HOG
descriptor in sciki-image package:
skimage.feature.hog
:param _norient: uint
number of orientations of the gradients
:param _ppc: uint
pixels per cell
:param _cpb: uint
cells per block
"""
self.norient = _norient
self.ppc = _ppc
self.cpb = _cpb
return
def compute(self, image):
"""
Computes HOG on a given image.
:param image: numpy.ndarray
:return: numpy.ndarray
a vector of features
"""
r = hog(image, pixels_per_cell=self.ppc, cells_per_block=self.cpb,
visualise=False, normalise=False)
return r
@staticmethod
def dist(ft1, ft2, method='euclidean'):
"""
Compute the distance between two sets of HOG features. Possible distance types
are:
-Euclidean
-cosine distance: this is not a proper distance!
"""
dm = {'euclidean': lambda x_, y_: norm(x_ - y_),
'cosine': lambda x_, y_: dot(x_, y_) / (norm(x_) * norm(y_))
}
method = method.lower()
if method not in dm.keys():
raise ValueError('Unknown method')
return dm[method](ft1, ft2)
# end HOGDescriptors
class HistDescriptor(LocalDescriptor):
"""
Provides local descriptors in terms of histograms of grey levels.
"""
name = nstr(b'hist')
def __init__(self, _interval=(0, 1), _nbins=10):
"""
Initialize an HistDescriptors object: a simple histogram of
grey-levels
:param _interval: tuple
the minimum and maximum values to be accounted for
:param _nbins: uint
number of bins in the histogram
"""
self.interval = _interval
self.nbins = _nbins
return
def compute(self, image):
"""
Computes the histogram on a given image.
:param image: numpy.ndarray
:return: numpy.ndarray
a vector of frequencies
"""
if image.ndim != 2:
raise ValueError("Only grey-level images are supported")
h, _ = np.histogram(image, normed=True, bins=self.nbins, range=self.interval)
return h
@staticmethod
def dist(ft1, ft2, method='bh'):
"""
Computes the distance between two sets of histogram features.
Args:
ft1, ft2: numpy.ndarray (vector)
histograms as returned by compute()
method: string
the method used for computing the distance between the two sets of features:
'kl' - Kullback-Leibler divergence (symmetrized by 0.5*(KL(p,q)+KL(q,p))
'js' - Jensen-Shannon divergence: 0.5*(KL(p,m)+KL(q,m)) where m=(p+q)/2
'bh' - Bhattacharyya distance: -log(sqrt(sum_i (p_i*q_i)))
'ma' - Matusita distance: sqrt(sum_i (sqrt(p_i)-sqrt(q_i))**2)
"""
# distance methods
dm = {'kl': lambda x_, y_: 0.5 * (entropy(x_, y_) + entropy(y_, x_)),
'js': lambda x_, y_: 0.5 * (entropy(x_, 0.5 * (x_ + y_)) + entropy(y_, 0.5 * (x_ + y_))),
'bh': lambda x_, y_: -np.log(np.sum(np.sqrt(x_ * y_))),
'ma': lambda x_, y_: np.sqrt(np.sum((np.sqrt(x_) - np.sqrt(y_)) ** 2))
}
method = method.lower()
if method not in dm.keys():
raise ValueError('Unknown method')
return dm[method](ft1, ft2)
# end HistDescriptors
# Haar-like descriptors
class HaarLikeDescriptor(LocalDescriptor):
"""
Provides local descriptors in terms of respones to a series of Haar-like
features [1]_.
The coding is inspired by HaarLikeFeature class from SimpleCV (www.simplecv.org).
.. [1] http://en.wikipedia.org/wiki/Haar-like_features
"""
name = nstr(b'haar')
def __init__(self, _haars, _norm=True):
"""
Initialize an HaarLikeDescriptors object.
:param _haars: list
a list of feature descriptors. A feature descriptor is a list of points (row, column) in a normalized
coordinate system ((0,0) -> (1,1)) describing the "positive" (black) patches from a Haar-like
feature. All the patches not specified in this list are considered "negative" (white).
The value corresponding to such a feature is the (weighted) sum of pixel intensities covered by
"positive" patches from which the (weighted) sum of pixel intensities covered by "negative" patches
is subtracted.
See some examples at:
- http://www.codeproject.com/Articles/27125/Ultra-Rapid-Object-Detection-in-Computer-Vision-Ap
- http://en.wikipedia.org/wiki/Haar-like_features
Examples of Haar-like features coding:
- a Haar-like feature in which the left side is "positive" (*) and the right side "negative" (.):
+-------+-------+
|*******|.......|
|*******|.......|
|*******|.......|
|*******|.......|
|*******|.......|
|*******|.......|
+-------+-------+
The corresponding coding is: [[(0.0, 0.0), (0.5, 1.0)]].
- a Haar-like feature with diagonal "positive" (*) patches:
+-------+-------+
|*******|.......|
|*******|.......|
|*******|.......|
+-------+-------+
|.......|*******|
|.......|*******|
|.......|*******|
+-------+-------+
The corresponding coding is: [[(0.0, 0.0), (0.5, 0.5)], [(0.5, 0.5), (1.0, 1.0)]].
:param _norm: boolean
Should the features be normalized? (scale-independent?) Default: True
"""
self.haars = _haars
self.nfeats = len(_haars)
self.norm = _norm
# Check that all coordinates are between 0 and 1:
if any([_p < 0.0 or _p > 1.0 for _p in flatten(_haars)]):
raise ValueError("Improper Haar feature specification.")
return
def compute(self, image):
"""
Computes the Haar-like descriptors on an INTEGRAL image.
:param image: numpy.ndarray
This must be the integral image, as computed by skimage.transform.integral_image(),
for example. This format does not contain the first row and column of 0s.
:param _norm: boolean
If True, the features are normalized by half the number of pixels in the image.
:return: numpy.ndarray
a vector of feature values (one per Haar-like feature)
"""
if image.ndim != 2:
raise ValueError("Only grey-level images are supported")
h, w = image.shape
h -= 1
w -= 1
nrm_fact = h * w if self.norm else 1.0
f = np.zeros(self.nfeats, dtype=np.float)
i = 0
S0 = image[h, w] + image[0, 0] - image[h, 0] - image[0, w] # integral over the image
for hr in self.haars: # for each Haar-like feature
S = 0L # will contain the sum of positive patches in the feature
for p in hr: # for each patch in the current feature
a, b = p # coords of the corners of the patch
row_a = np.int(np.floor(p[0][0] * h))
col_a = np.int(np.floor(p[0][1] * w))
row_b = np.int(np.floor(p[1][0] * h))
col_b = np.int(np.floor(p[1][1] * w))
S += image[row_b, col_b] + image[row_a, col_a] - image[row_b, col_a] - image[row_a, col_b]
# The final value of the Haar-like feature is the sum of positive patches minus
# the sum of negative patches. Since everything that is not specified as positive
# patch is considered negative, the sum of the negative patches is the total sum
# in the image (corner bottom-right in the integral image) minus the sum of positive
# ones. Hence, the value of the Haar-like feature is 2*S - S0
f[i] = (2.0 * S - S0) / nrm_fact
i += 1
return f
@staticmethod
def dist(ft1, ft2, method='euclidean'):
"""
Computes the distance between two Haar-like feature vectors.
:param ft1: a vector of features
:type ft1: numpy.array (1xn)
:param ft2: a vector of features
:type ft2: numpy.array (1xn)
:param method: the method for computing the distance
:type method: string
:return: a distance
:rtype: float
"""
dm = {'euclidean': lambda x_, y_: norm(x_ - y_),
'cosine': lambda x_, y_: dot(x_, y_) / (norm(x_) * norm(y_))
}
method = method.lower()
if method not in dm.keys():
raise ValueError('Unknown method')
return dm[method](ft1, ft2)
@staticmethod
def haars1():
"""
Generates a list of Haar-like feature specifications.
:return:
:rtype:
"""
h = [
[[(0.0, 0.0), (0.5, 0.5)], [(0.5, 0.5), (1.0, 1.0)]], # diagonal blocks
[[(0.0, 0.0), (1.0, 0.5)]], # vertical edge
[[(0.0, 0.0), (0.5, 1.0)]], # horizontal edge
[[(0.0, 0.33), (1.0, 0.67)]], # vertical central band
[[(0.33, 0.0), (0.67, 1.0)]], # horizontal central band
[[(0.25, 0.25), (0.75, 0.75)]]
]
return h
# end HaarLikeDescriptor
# Summary statistics descriptor
class StatsDescriptor(LocalDescriptor):
"""
A very simple local descriptor based on the first moments
statistics.
"""
name = nstr(b'stats')
def __init__(self, stats=None):
self._statsfn = {
'mean': lambda x_: x_.mean(),
'std': lambda x_: x_.std(),
'kurtosis': lambda x_: kurtosis(x_, axis=None, fisher=True),
'skewness': lambda x_: skew(x_, axis=None, bias=True)
}
if stats is None:
self.stats = ['mean', 'std']
else:
self.stats = [s.lower() for s in stats]
for s in self.stats:
if s not in self._statsfn:
raise ValueError('Unknown summary statistic')
def compute(self, image):
return np.array([self._statsfn[s](image) for s in self.stats])
@staticmethod
def dist(ft1, ft2, method='euclidean'):
"""
Computes the distance between two Stats feature vectors.
:param ft1: a vector of features
:type ft1: numpy.array (1xn)
:param ft2: a vector of features
:type ft2: numpy.array (1xn)
:param method: the method for computing the distance
:type method: string
:return: a distance
:rtype: float
"""
return norm(ft1 - ft2)
# end StatsDescriptor
| mit |
fsimkovic/conkit | conkit/plot/tools.py | 1 | 6281 | # BSD 3-Clause License
#
# Copyright (c) 2016-18, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Internal utility functions"""
__author__ = "Felix Simkovic"
__date__ = "16 Feb 2017"
__version__ = "0.1"
import numpy as np
from conkit.core.contact import Contact
from conkit.core.contactmap import ContactMap
from conkit.core.contactfile import ContactFile
from conkit.core.sequence import Sequence
from conkit.core.sequencefile import SequenceFile
from conkit.misc import deprecate
HierarchyIndex = {
'Contact': Contact,
'ContactMap': ContactMap,
'ContactFile': ContactFile,
'Sequence': Sequence,
'SequenceFile': SequenceFile
}
class ColorDefinitions(object):
"""A class storing all color definitions for the various plots
for fast and easy handling
"""
GENERAL = '#000000'
MATCH = '#0F0B2C'
MISMATCH = '#DC4869'
STRUCTURAL = '#D8D6D6'
L5CUTOFF = '#3F4587'
L20CUTOFF = '#B5DD2B'
PRECISION50 = L5CUTOFF
FACTOR1 = L20CUTOFF
AA_ENCODING = {
'A': '#882D17',
'C': '#F3C300',
'D': '#875692',
'E': '#F38400',
'F': '#A1CAF1',
'G': '#BE0032',
'H': '#C2B280',
'I': '#848482',
'K': '#008856',
'L': '#E68FAC',
'M': '#0067A5',
'N': '#F99379',
'P': '#604E97',
'Q': '#F6A600',
'R': '#B3446C',
'S': '#DCD300',
'T': '#8DB600',
'V': '#654522',
'W': '#E25822',
'Y': '#2B3D26',
'X': '#000000'
}
def find_minima(data, order=1):
"""Find the minima in a 1-D list
Parameters
----------
data : list, tuple
A list of values
order : int, optional
The order, i.e. number of points next to point to consider
Returns
-------
list
A list of indices for minima
Warning
-------
For multi-dimensional problems, see :func:`~scipy.signal.argrelmin`.
Raises
------
:exc:`ValueError`
Order needs to be >= 1!
:exc:`ValueError`
More than two elements required!
"""
if order < 1:
raise ValueError("Order needs to be >= 1!")
data = np.asarray(data)
nelements = data.shape[0]
if nelements < 2:
raise ValueError("More than two elements required!")
results = np.zeros(nelements, dtype=np.bool_)
for i in np.arange(1, nelements - 1):
start = 0 if i - order < 0 else i - order
end = nelements if i + order + 1 > nelements else i + order + 1
results[i] = np.all(data[start:i] > data[i]) and np.all(data[i] < data[i + 1:end])
return np.where(results)[0].tolist()
def get_adjusted_aspect(ax, aspect_ratio):
"""Adjust the aspect ratio
Parameters
----------
ax : :obj:`~matplotlib.axes.Axes`
A :obj:`~matplotlib.axes.Axes` instance
aspect_ratio : float
The desired aspect ratio for :obj:`~matplotlib.axes.Axes`
Returns
-------
float
The required aspect ratio to achieve the desired one
Warning
-------
This function only works for non-logarithmic axes.
"""
default_ratio = (ax.get_xlim()[1] - ax.get_xlim()[0]) / (ax.get_ylim()[1] - ax.get_ylim()[0])
return float(default_ratio * aspect_ratio)
@deprecate('0.11', msg='Use get_points_on_circle instead')
def points_on_circle(*args, **kwargs):
return get_points_on_circle(*args, **kwargs)
def get_points_on_circle(radius, h=0, k=0):
"""Calculate points on a circle with even spacing
Parameters
----------
radius : int
The radius of the circle
h : int, optional
The x coordinate of the origin
k : int, optional
The y coordinate of the origin
Returns
-------
list
The list of coordinates for each point
"""
if radius == 0:
return [[]]
else:
space = 2 * np.pi / radius
coords = np.zeros((radius, 2))
for i in np.arange(radius):
coords[i] = [round(h + radius * np.cos(space * i), 6), round(k + radius * np.sin(space * i), 6)]
return coords.tolist()
def get_radius_around_circle(p1, p2):
"""Obtain the radius around a given circle
Parameters
----------
p1 : list, tuple
Point 1
p2 : list, tuple
Point 2 adjacent `p1`
Returns
-------
float
The radius for points so p1 and p2 do not intersect
"""
dist = np.linalg.norm(np.array(p1) - np.array(p2))
return dist / 2.0 - dist * 0.1
def _isinstance(hierarchy, hierarchy_type):
"""Confirm the data structure to be a ConKit definition"""
if isinstance(hierarchy_type, str) and hierarchy_type in HierarchyIndex:
return isinstance(hierarchy, HierarchyIndex[hierarchy_type])
else:
return isinstance(hierarchy, hierarchy_type)
| bsd-3-clause |
sebalander/sebaPhD | calibration/calibrateRegularCameraIntr.py | 2 | 10632 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 5 16:30:53 2016
calibrates using fisheye distortion model (polynomial in theta)
help in
http://docs.opencv.org/ref/master/d9/d0c/group__calib3d.html#ga3207604e4b1a1758aa66acb6ed5aa65d&gsc.tab=0
@author: sebalander
"""
# %%
import cv2
import numpy as np
from numpy import zeros
import glob
import matplotlib.pyplot as plt
#from scipy import linalg
import poseCalibration as pc
from lmfit import minimize, Parameters
import poseRationalCalibration as rational
# %% ========== ========== RATIONAL PARAMETER HANDLING ========== ==========
def formatParametersChessIntrs(rVecs, tVecs, linearCoeffs, distCoeffs):
'''
set to vary all parameetrs
'''
params = Parameters()
for j in range(len(rVecs)):
for i in range(3):
params.add('rvec%d%d'%(j,i),
value=rVecs[j,i,0], vary=True)
params.add('tvec%d%d'%(j,i),
value=tVecs[j,i,0], vary=True)
params.add('fX', value=linearCoeffs[0,0], vary=True)
params.add('fY', value=linearCoeffs[1,1], vary=True)
params.add('cX', value=linearCoeffs[0,2], vary=True)
params.add('cY', value=linearCoeffs[1,2], vary=True)
# polynomial coeffs, grade 7
# # (k1,k2,p1,p2[,k3[,k4,k5,k6[,s1,s2,s3,s4[,τx,τy]]]])
#for i in [2,3,8,9,10,11,12,13]:
# params.add('distCoeffs%d'%i,
# value=distCoeffs[i,0], vary=False)
params.add('numDist0', value=distCoeffs[0,0], vary=True)
params.add('numDist1', value=distCoeffs[1,0], vary=True)
params.add('numDist2', value=distCoeffs[4,0], vary=True)
params.add('denomDist0', value=distCoeffs[5,0], vary=True)
params.add('denomDist1', value=distCoeffs[6,0], vary=True)
params.add('denomDist2', value=distCoeffs[7,0], vary=True)
return params
# %%
def retrieveParametersChess(params):
n = len([0 for x in params.iterkeys()])/6 - 3
rvec = zeros((n,3,1))
tvec = zeros((n,3,1))
for j in range(n):
for i in range(3):
rvec[j,i,0] = params['rvec%d%d'%(j,i)].value
tvec[j,i,0] = params['tvec%d%d'%(j,i)].value
cameraMatrix = zeros((3,3))
cameraMatrix[0,0] = params['fX'].value
cameraMatrix[1,1] = params['fY'].value
cameraMatrix[0,2] = params['cX'].value
cameraMatrix[1,2] = params['cY'].value
cameraMatrix[2,2] = 1
distCoeffs = zeros((14,1))
distCoeffs[0] = params['numDist0'].value
distCoeffs[1] = params['numDist1'].value
distCoeffs[4] = params['numDist2'].value
distCoeffs[5] = params['denomDist0'].value
distCoeffs[6] = params['denomDist1'].value
distCoeffs[7] = params['denomDist2'].value
return rvec, tvec, cameraMatrix, distCoeffs
# %% change state of paramters
def setDistortionParams(params, state):
for i in [0,1,4,5,6,7]:
params['distCoeffs%d'%i].vary=state
def setLinearParams(params, state):
params['cameraMatrix0'].value = state
params['cameraMatrix1'].value = state
params['cameraMatrix2'].value = state
params['cameraMatrix3'].value = state
def setExtrinsicParams(params, state):
n = len([0 for x in params.iterkeys()])/6 - 3
for j in range(n):
for i in range(3):
params['rvec%d%d'%(j,i)].vary = state
params['tvec%d%d'%(j,i)].vary = state
# %% residual
def residualDirectChessRatio(params, fiducialPoints, corners):
'''
'''
n = len(corners)
rVecs, tVecs, linearCoeffs, distCoeffs = retrieveParametersChess(params)
E = list()
for j in range(n):
projectedCorners = rational.direct(fiducialPoints,
rVecs[j],
tVecs[j],
linearCoeffs,
distCoeffs)
err = projectedCorners[:,0,:] - corners[j,:,0,:]
E.append(err)
return np.reshape(E,(n*len(fiducialPoints[0]),2))
# %%
def calibrateDirectChessRatio(fiducialPoints, corners, rVecs, tVecs, linearCoeffs, distCoeffs):
'''
parece que si no se hace por etapas hay inestabilidad numerica. lo veo
con la camara plana que en ppio no deberia tener problemas para ajustar y
en mucha mayor medida con la fisheye.
quiza valga la pena iterar ete ciclo la cantidad de veces que sea necesario
hasta que converja. roguemos que converja
'''
params = formatParametersChessIntrs(rVecs, tVecs, linearCoeffs, distCoeffs) # generate Parameters obj
setDistortionParams(params,False)
setLinearParams(params,True)
setExtrinsicParams(params,True)
out = minimize(residualDirectChessRatio,
params,
args=(fiducialPoints,
corners),
xtol=1e-5, # Relative error in the approximate solution
ftol=1e-5, # Relative error in the desired sum of squares
maxfev=int(1e3))
'''
params = out.params
# setDistortionParams(params,False)
setLinearParams(params,True)
setExtrinsicParams(params,False)
out = minimize(residualDirectChessRatio,
params,
args=(fiducialPoints,
corners),
xtol=1e-5, # Relative error in the approximate solution
ftol=1e-5, # Relative error in the desired sum of squares
maxfev=int(1e3))
params = out.params
setDistortionParams(params,True)
setLinearParams(params,False)
# setExtrinsicParams(params,False)
out = minimize(residualDirectChessRatio,
params,
args=(fiducialPoints,
corners),
xtol=1e-5, # Relative error in the approximate solution
ftol=1e-5, # Relative error in the desired sum of squares
maxfev=int(1e3))
'''
return out
# %%
reload(pc)
# %% LOAD DATA
### fisheye data
imagesFolder = "./resources/fishChessboard/"
extension = "*.png"
cornersFile = "./resources/fishChessboard/fishCorners.npy"
patternFile = "./resources/chessPattern.npy"
imgShapeFile = "./resources/fishImgShape.npy"
distCoeffsFile = "./resources/fishDistCoeffs.npy"
linearCoeffsFile = "./resources/fishLinearCoeffs.npy"
rvecsFile = "./resources/fishChessboard/fishRvecs.npy"
tvecsFile = "./resources/fishChessboard/fishTvecs.npy"
### ptz data
#imagesFolder = "./resources/PTZchessboard/zoom 0.0/"
#extension = "*.jpg"
#cornersFile = "./resources/PTZchessboard/zoom 0.0/ptzCorners.npy"
#patternFile = "./resources/chessPattern.npy"
#imgShapeFile = "./resources/ptzImgShape.npy"
#
#distCoeffsFile = "./resources/PTZchessboard/zoom 0.0/ptzDistCoeffs.npy"
#linearCoeffsFile = "./resources/PTZchessboard/zoom 0.0/ptzLinearCoeffs.npy"
#rvecsFile = "./resources/PTZchessboard/zoom 0.0/ptzRvecs.npy"
#tvecsFile = "./resources/PTZchessboard/zoom 0.0/ptzTvecs.npy"
corners = np.load(cornersFile).transpose((0,2,1,3))
fiducialPoints = np.load(patternFile)
imgSize = np.load(imgShapeFile)
images = glob.glob(imagesFolder+extension)
distCoeffsTrue = np.load(distCoeffsFile)
linearCoeffsTrue = np.load(linearCoeffsFile)
rVecsTrue = np.load(rvecsFile)
tVecsTrue = np.load(tvecsFile)
# %% # %% from testHomography.py
## use real data
#f = 5e2 # proposal of f, can't be estimated from homography
#
#rVecs, tVecs, Hs = pc.estimateInitialPose(fiducialPoints, corners, f, imgSize)
#
#pc.plotHomographyToMatch(fiducialPoints, corners[1:3], f, imgSize, images[1:3])
#
#pc.plotForwardHomography(fiducialPoints, corners[1:3], f, imgSize, Hs[1:3], images[1:3])
#
#pc.plotBackwardHomography(fiducialPoints, corners[1:3], f, imgSize, Hs[1:3])
# %%
model= 'rational'
f = 1e3 # importa el orden de magnitud aunque no demaisado.
# entre 1e2 y 1e3 anda?
# %% intrinsic parameters initial conditions
linearCoeffsIni = np.array([[f, 0, imgSize[1]/2], [0, f, imgSize[0]/2], [0, 0, 1]])
#distCoeffsIni = np.zeros((14, 1)) # despues hacer generico para todos los modelos
#k = 10 # factor en que escalear la distancia focal
#linearCoeffsIni = linearCoeffsTrue * [k,k,1]
distCoeffsIni = distCoeffsTrue
# %% extrinsic parameters initial conditions, from estimated homography
rVecsIni, tVecsIni, Hs = pc.estimateInitialPose(fiducialPoints, corners, linearCoeffsIni)
#rVecsIni = rVecsTrue
#tVecsIni = tVecsTrue
# %% from testposecalibration DIRECT GENERIC CALIBRATION
i=0
img = plt.imread(images[i])
imageCorners = corners[i]
rVec = rVecsIni[i]
tVec = tVecsIni[i]
linearCoeffs = linearCoeffsIni
distCoeffs = distCoeffsIni
# direct mapping with initial conditions
cornersProjected = pc.direct(fiducialPoints, rVec, tVec, linearCoeffs, distCoeffs, model)
# plot corners in image
pc.cornerComparison(img, imageCorners, cornersProjected)
# %%
# format parameters
initialParams = formatParametersChessIntrs(rVecsIni, tVecsIni, linearCoeffsIni, distCoeffsIni)
# test retrieving parameters
# n=10
# retrieveParametersChess(initialParams)
# %%
#E = residualDirectChessRatio(initialParams, fiducialPoints, corners)
out = calibrateDirectChessRatio(fiducialPoints, corners, rVecsIni, tVecsIni, linearCoeffsIni, distCoeffsIni)
out.nfev
out.message
out.lmdif_message
(out.residual**2).sum()
# %%
rVecsOpt, tVecsOpt, cameraMatrixOpt, distCoeffsOpt = retrieveParametersChess(out.params)
# %%cameraMatrix0
img = plt.imread(images[i])
imageCorners = corners[i]
rVec = rVecsOpt[i]
tVec = tVecsOpt[i]
linearCoeffs = cameraMatrixOpt
distCoeffs = distCoeffsOpt
# direct mapping with initial conditions
cornersProjected = pc.direct(fiducialPoints, rVec, tVec, linearCoeffs, distCoeffs, model)
# plot corners in image
pc.cornerComparison(img, imageCorners, cornersProjected)
# %% comparar fiteos. true corresponde a lo que da chessboard
distCoeffsTrue
distCoeffsOpt
pc.plotRationalDist(distCoeffsTrue,imgSize, linearCoeffsTrue)
pc.plotRationalDist(distCoeffsOpt,imgSize, cameraMatrixOpt)
linearCoeffsTrue
cameraMatrixOpt
rVecsTrue[i]
rVecsOpt[i]
tVecsTrue[i]
tVecsOpt[i]
np.linalg.norm(rVecsTrue[9])
np.linalg.norm(rVecsOpt[9])
# %% ver porque el signo cambiado en rVecs, que significa?
r1, r2 = rVecsTrue[1,:,0],rVecsOpt[1,:,0]
r1
r2
np.linalg.norm(r1), np.linalg.norm(r2)
distance = np.array([np.linalg.norm(rVecsTrue[j,:,0] - rVecsOpt[j,:,0]) for j in range(len(rVecsTrue))]) / np.pi / 2
# es por la periodicidad en 2pi
plt.figure()
plt.plot(distance)
'''
pero no quiere decir que la camara apunta hacia donde debe. pero da igual
que opencv al menos
'''
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/testing/decorators.py | 2 | 10495 | from __future__ import print_function
from matplotlib.testing.noseclasses import KnownFailureTest, \
KnownFailureDidNotFailTest, ImageComparisonFailure
import os, sys, shutil
import nose
import matplotlib
import matplotlib.tests
import matplotlib.units
from matplotlib import ticker
from matplotlib import pyplot as plt
from matplotlib import ft2font
import numpy as np
from matplotlib.testing.compare import comparable_formats, compare_images, \
make_test_filename
import warnings
def knownfailureif(fail_condition, msg=None, known_exception_class=None ):
"""
Assume a will fail if *fail_condition* is True. *fail_condition*
may also be False or the string 'indeterminate'.
*msg* is the error message displayed for the test.
If *known_exception_class* is not None, the failure is only known
if the exception is an instance of this class. (Default = None)
"""
# based on numpy.testing.dec.knownfailureif
if msg is None:
msg = 'Test known to fail'
def known_fail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
def failer(*args, **kwargs):
try:
# Always run the test (to generate images).
result = f(*args, **kwargs)
except Exception as err:
if fail_condition:
if known_exception_class is not None:
if not isinstance(err,known_exception_class):
# This is not the expected exception
raise
# (Keep the next ultra-long comment so in shows in console.)
raise KnownFailureTest(msg) # An error here when running nose means that you don't have the matplotlib.testing.noseclasses:KnownFailure plugin in use.
else:
raise
if fail_condition and fail_condition != 'indeterminate':
raise KnownFailureDidNotFailTest(msg)
return result
return nose.tools.make_decorator(f)(failer)
return known_fail_decorator
class CleanupTest(object):
@classmethod
def setup_class(cls):
cls.original_units_registry = matplotlib.units.registry.copy()
@classmethod
def teardown_class(cls):
plt.close('all')
matplotlib.tests.setup()
matplotlib.units.registry.clear()
matplotlib.units.registry.update(cls.original_units_registry)
warnings.resetwarnings() #reset any warning filters set in tests
def test(self):
self._func()
def cleanup(func):
name = func.__name__
func = staticmethod(func)
func.__get__(1).__name__ = '_private'
new_class = type(
name,
(CleanupTest,),
{'_func': func})
return new_class
def check_freetype_version(ver):
if ver is None:
return True
from distutils import version
if isinstance(ver, str):
ver = (ver, ver)
ver = [version.StrictVersion(x) for x in ver]
found = version.StrictVersion(ft2font.__freetype_version__)
return found >= ver[0] and found <= ver[1]
class ImageComparisonTest(CleanupTest):
@classmethod
def setup_class(cls):
CleanupTest.setup_class()
cls._func()
@staticmethod
def remove_text(figure):
figure.suptitle("")
for ax in figure.get_axes():
ax.set_title("")
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
def test(self):
baseline_dir, result_dir = _image_directories(self._func)
for fignum, baseline in zip(plt.get_fignums(), self._baseline_images):
figure = plt.figure(fignum)
for extension in self._extensions:
will_fail = not extension in comparable_formats()
if will_fail:
fail_msg = 'Cannot compare %s files on this system' % extension
else:
fail_msg = 'No failure expected'
orig_expected_fname = os.path.join(baseline_dir, baseline) + '.' + extension
if extension == 'eps' and not os.path.exists(orig_expected_fname):
orig_expected_fname = os.path.join(baseline_dir, baseline) + '.pdf'
expected_fname = make_test_filename(os.path.join(
result_dir, os.path.basename(orig_expected_fname)), 'expected')
actual_fname = os.path.join(result_dir, baseline) + '.' + extension
if os.path.exists(orig_expected_fname):
shutil.copyfile(orig_expected_fname, expected_fname)
else:
will_fail = True
fail_msg = 'Do not have baseline image %s' % expected_fname
@knownfailureif(
will_fail, fail_msg,
known_exception_class=ImageComparisonFailure)
def do_test():
if self._remove_text:
self.remove_text(figure)
figure.savefig(actual_fname)
err = compare_images(expected_fname, actual_fname,
self._tol, in_decorator=True)
try:
if not os.path.exists(expected_fname):
raise ImageComparisonFailure(
'image does not exist: %s' % expected_fname)
if err:
raise ImageComparisonFailure(
'images not close: %(actual)s vs. %(expected)s '
'(RMS %(rms).3f)'%err)
except ImageComparisonFailure:
if not check_freetype_version(self._freetype_version):
raise KnownFailureTest(
"Mismatched version of freetype. Test requires '%s', you have '%s'" %
(self._freetype_version, ft2font.__freetype_version__))
raise
yield (do_test,)
def image_comparison(baseline_images=None, extensions=None, tol=1e-3,
freetype_version=None, remove_text=False):
"""
call signature::
image_comparison(baseline_images=['my_figure'], extensions=None)
Compare images generated by the test with those specified in
*baseline_images*, which must correspond else an
ImageComparisonFailure exception will be raised.
Keyword arguments:
*baseline_images*: list
A list of strings specifying the names of the images generated
by calls to :meth:`matplotlib.figure.savefig`.
*extensions*: [ None | list ]
If *None*, default to all supported extensions.
Otherwise, a list of extensions to test. For example ['png','pdf'].
*tol*: (default 1e-3)
The RMS threshold above which the test is considered failed.
*freetype_version*: str or tuple
The expected freetype version or range of versions for this
test to pass.
*remove_text*: bool
Remove the title and tick text from the figure before
comparison. This does not remove other, more deliberate,
text, such as legends and annotations.
"""
if baseline_images is None:
raise ValueError('baseline_images must be specified')
if extensions is None:
# default extensions to test
extensions = ['png', 'pdf', 'svg']
def compare_images_decorator(func):
# We want to run the setup function (the actual test function
# that generates the figure objects) only once for each type
# of output file. The only way to achieve this with nose
# appears to be to create a test class with "setup_class" and
# "teardown_class" methods. Creating a class instance doesn't
# work, so we use type() to actually create a class and fill
# it with the appropriate methods.
name = func.__name__
# For nose 1.0, we need to rename the test function to
# something without the word "test", or it will be run as
# well, outside of the context of our image comparison test
# generator.
func = staticmethod(func)
func.__get__(1).__name__ = '_private'
new_class = type(
name,
(ImageComparisonTest,),
{'_func': func,
'_baseline_images': baseline_images,
'_extensions': extensions,
'_tol': tol,
'_freetype_version': freetype_version,
'_remove_text': remove_text})
return new_class
return compare_images_decorator
def _image_directories(func):
"""
Compute the baseline and result image directories for testing *func*.
Create the result directory if it doesn't exist.
"""
module_name = func.__module__
if module_name == '__main__':
# FIXME: this won't work for nested packages in matplotlib.tests
warnings.warn('test module run as script. guessing baseline image locations')
script_name = sys.argv[0]
basedir = os.path.abspath(os.path.dirname(script_name))
subdir = os.path.splitext(os.path.split(script_name)[1])[0]
else:
mods = module_name.split('.')
mods.pop(0) # <- will be the name of the package being tested (in
# most cases "matplotlib")
assert mods.pop(0) == 'tests'
subdir = os.path.join(*mods)
import imp
def find_dotted_module(module_name, path=None):
"""A version of imp which can handle dots in the module name"""
res = None
for sub_mod in module_name.split('.'):
res = _, path, _ = imp.find_module(sub_mod, path)
path = [path]
return res
mod_file = find_dotted_module(func.__module__)[1]
basedir = os.path.dirname(mod_file)
baseline_dir = os.path.join(basedir, 'baseline_images', subdir)
result_dir = os.path.abspath(os.path.join('result_images', subdir))
if not os.path.exists(result_dir):
os.makedirs(result_dir)
return baseline_dir, result_dir
| mit |
crcollins/chemtools-webapp | chemtools/dataparser.py | 1 | 3311 | from cStringIO import StringIO
import math
import numpy as np
from scipy.optimize import curve_fit
import matplotlib
matplotlib.use('Cairo')
import matplotlib.pyplot as plot
np.seterr(all="ignore")
from fileparser import Output, catch
def kuhn_exp(x, a, b):
return a * np.sqrt(1 - b * np.cos(math.pi / (x + 1)))
def predict_values(xvals, homovals, lumovals, gapvals):
x = np.array(xvals)
maxx = max(xvals)
if maxx > 1:
x = 1. / x
maxx = x.max()
homoy = np.array(homovals)
homo_fit = lambda x, a, b: kuhn_exp(x, a, b)
(homoa, homob), var_matrix = curve_fit(homo_fit, x, homoy, p0=[-8, -.8])
homo_func = lambda x: kuhn_exp(x, homoa, homob)
lumoy = np.array(lumovals)
lumo_fit = lambda x, a, b: kuhn_exp(x, a, b) + homo_func(x)
(lumoa, lumob), var_matrix = curve_fit(lumo_fit, x, lumoy, p0=[5, -.8])
lumo_func = lambda x: kuhn_exp(x, lumoa, lumob) + homo_func(x)
gapy = np.array(gapvals)
gap_fit = lambda x, a, b: kuhn_exp(x, a, b) + lumo_func(x)
(gapa, gapb), var_matrix = curve_fit(gap_fit, x, gapy, p0=[11, -.8])
gap_func = lambda x: kuhn_exp(x, gapa, gapb) + lumo_func(x)
homo_limit = homo_func(0)
lumo_limit = lumo_func(0)
gap_limit = gap_func(0)
results = {
"homo": (homo_limit, homoa, homob, homo_func),
"lumo": (lumo_limit, lumoa, lumob, lumo_func),
"gap": (gap_limit, gapa, gapb, gap_func),
}
return results
class DataParser(Output):
def __init__(self, f):
super(DataParser, self).__init__()
self.plots = (StringIO(), StringIO())
self.parse_file(f)
def get_graphs(self):
return self.plots
def extract_data(self, f):
out = []
for line in f:
if not line.startswith("#") and line.strip():
out.append([float(x.strip())
for x in line.replace(' ', '').split(',') if x])
return out
@catch
def parse_file(self, f):
datax, datahomo, datalumo, datagap = self.extract_data(f)
x = np.array(datax)
homoy = np.array(datahomo)
lumoy = np.array(datalumo)
gapy = np.array(datagap)
results = predict_values(datax, homoy, lumoy, gapy)
for key in ["Homo", "Lumo", "Gap"]:
values = results[key.lower()]
self.write(key)
self.write("A: %f, B: %f" % (values[1], values[2]))
self.write("limit: %f" % values[0])
self.write('')
maxx = max(datax)
if maxx > 1:
x = 1. / x
maxx = x.max()
xvals = np.linspace(0, maxx, 20)
# Make HOMO/LUMO plot
plot.plot(x, homoy, 'ro')
homo_func = results["homo"][3]
plot.plot(xvals, homo_func(xvals), 'r')
plot.plot(x, lumoy, 'ro')
lumo_func = results["lumo"][3]
plot.plot(xvals, lumo_func(xvals), 'g')
plot.ylabel("Eg in eV")
plot.xlabel("1/N")
plot.savefig(self.plots[0], format="eps")
plot.clf()
# Make Gap plot
plot.plot(x, gapy, 'ro')
gap_func = results["gap"][3]
plot.plot(xvals, gap_func(xvals), 'r')
plot.ylabel("Eg in eV")
plot.xlabel("1/N")
plot.savefig(self.plots[1], format="eps")
plot.clf()
| mit |
michaelgat/Udacity_DL | weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |