repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
kmike/scikit-learn | sklearn/utils/__init__.py | 3 | 10094 | """
The :mod:`sklearn.utils` module includes various utilites.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array, check_arrays, safe_asarray,
assert_all_finite, array2d, atleast2d_or_csc,
atleast2d_or_csr, warn_if_not_float,
check_random_state)
from .class_weight import compute_class_weight
__all__ = ["murmurhash3_32", "as_float_array", "check_arrays", "safe_asarray",
"assert_all_finite", "array2d", "atleast2d_or_csc",
"atleast2d_or_csr", "warn_if_not_float", "check_random_state",
"compute_class_weight"]
# Make sure that DeprecationWarning get printed
warnings.simplefilter("always", DeprecationWarning)
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asanyarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
`*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0]
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = [[1., 0.], [2., 1.], [0., 0.]]
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:class:`sklearn.cross_validation.Bootstrap`
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
arrays = check_arrays(*arrays, sparse_format='csr')
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
resampled_arrays = []
for array in arrays:
array = array[indices]
resampled_arrays.append(array)
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
`*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0]
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = [[1., 0.], [2., 1.], [0., 0.]]
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
Returns
-------
X ** 2 : element wise square
"""
X = safe_asarray(X)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_even_slices(n, n_packs):
"""Generator to create n_packs slices going up to n.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
yield slice(start, end, None)
start = end
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(Warning):
"Custom warning to capture convergence problems"
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.20/_downloads/76822bb92a8465181ec2a7ee96ca8cf4/plot_decoding_csp_timefreq.py | 1 | 6457 | """
============================================================================
Decoding in time-frequency space data using the Common Spatial Pattern (CSP)
============================================================================
The time-frequency decomposition is estimated by iterating over raw data that
has been band-passed at different frequencies. This is used to compute a
covariance matrix over each epoch or a rolling time-window and extract the CSP
filtered signals. A linear discriminant classifier is then applied to these
signals.
"""
# Authors: Laura Gwilliams <laura.gwilliams@nyu.edu>
# Jean-Remi King <jeanremi.king@gmail.com>
# Alex Barachant <alexandre.barachant@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mne import Epochs, create_info, events_from_annotations
from mne.io import concatenate_raws, read_raw_edf
from mne.datasets import eegbci
from mne.decoding import CSP
from mne.time_frequency import AverageTFR
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder
###############################################################################
# Set parameters and read data
event_id = dict(hands=2, feet=3) # motor imagery: hands vs feet
subject = 1
runs = [6, 10, 14]
raw_fnames = eegbci.load_data(subject, runs)
raw = concatenate_raws([read_raw_edf(f, preload=True) for f in raw_fnames])
# Extract information from the raw file
sfreq = raw.info['sfreq']
events, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3))
raw.pick_types(meg=False, eeg=True, stim=False, eog=False, exclude='bads')
# Assemble the classifier using scikit-learn pipeline
clf = make_pipeline(CSP(n_components=4, reg=None, log=True, norm_trace=False),
LinearDiscriminantAnalysis())
n_splits = 5 # how many folds to use for cross-validation
cv = StratifiedKFold(n_splits=n_splits, shuffle=True)
# Classification & Time-frequency parameters
tmin, tmax = -.200, 2.000
n_cycles = 10. # how many complete cycles: used to define window size
min_freq = 5.
max_freq = 25.
n_freqs = 8 # how many frequency bins to use
# Assemble list of frequency range tuples
freqs = np.linspace(min_freq, max_freq, n_freqs) # assemble frequencies
freq_ranges = list(zip(freqs[:-1], freqs[1:])) # make freqs list of tuples
# Infer window spacing from the max freq and number of cycles to avoid gaps
window_spacing = (n_cycles / np.max(freqs) / 2.)
centered_w_times = np.arange(tmin, tmax, window_spacing)[1:]
n_windows = len(centered_w_times)
# Instantiate label encoder
le = LabelEncoder()
###############################################################################
# Loop through frequencies, apply classifier and save scores
# init scores
freq_scores = np.zeros((n_freqs - 1,))
# Loop through each frequency range of interest
for freq, (fmin, fmax) in enumerate(freq_ranges):
# Infer window size based on the frequency being used
w_size = n_cycles / ((fmax + fmin) / 2.) # in seconds
# Apply band-pass filter to isolate the specified frequencies
raw_filter = raw.copy().filter(fmin, fmax, n_jobs=1, fir_design='firwin',
skip_by_annotation='edge')
# Extract epochs from filtered data, padded by window size
epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size,
proj=False, baseline=None, preload=True)
epochs.drop_bad()
y = le.fit_transform(epochs.events[:, 2])
X = epochs.get_data()
# Save mean scores over folds for each frequency and time window
freq_scores[freq] = np.mean(cross_val_score(estimator=clf, X=X, y=y,
scoring='roc_auc', cv=cv,
n_jobs=1), axis=0)
###############################################################################
# Plot frequency results
plt.bar(freqs[:-1], freq_scores, width=np.diff(freqs)[0],
align='edge', edgecolor='black')
plt.xticks(freqs)
plt.ylim([0, 1])
plt.axhline(len(epochs['feet']) / len(epochs), color='k', linestyle='--',
label='chance level')
plt.legend()
plt.xlabel('Frequency (Hz)')
plt.ylabel('Decoding Scores')
plt.title('Frequency Decoding Scores')
###############################################################################
# Loop through frequencies and time, apply classifier and save scores
# init scores
tf_scores = np.zeros((n_freqs - 1, n_windows))
# Loop through each frequency range of interest
for freq, (fmin, fmax) in enumerate(freq_ranges):
# Infer window size based on the frequency being used
w_size = n_cycles / ((fmax + fmin) / 2.) # in seconds
# Apply band-pass filter to isolate the specified frequencies
raw_filter = raw.copy().filter(fmin, fmax, n_jobs=1, fir_design='firwin',
skip_by_annotation='edge')
# Extract epochs from filtered data, padded by window size
epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size,
proj=False, baseline=None, preload=True)
epochs.drop_bad()
y = le.fit_transform(epochs.events[:, 2])
# Roll covariance, csp and lda over time
for t, w_time in enumerate(centered_w_times):
# Center the min and max of the window
w_tmin = w_time - w_size / 2.
w_tmax = w_time + w_size / 2.
# Crop data into time-window of interest
X = epochs.copy().crop(w_tmin, w_tmax).get_data()
# Save mean scores over folds for each frequency and time window
tf_scores[freq, t] = np.mean(cross_val_score(estimator=clf, X=X, y=y,
scoring='roc_auc', cv=cv,
n_jobs=1), axis=0)
###############################################################################
# Plot time-frequency results
# Set up time frequency object
av_tfr = AverageTFR(create_info(['freq'], sfreq), tf_scores[np.newaxis, :],
centered_w_times, freqs[1:], 1)
chance = np.mean(y) # set chance level to white in the plot
av_tfr.plot([0], vmin=chance, title="Time-Frequency Decoding Scores",
cmap=plt.cm.Reds)
| bsd-3-clause |
bijanfallah/OI_CCLM | src/RMSE_MAPS_INGO.py | 1 | 2007 | # Program to show the maps of RMSE averaged over time
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
import os
from netCDF4 import Dataset as NetCDFFile
import numpy as np
from CCLM_OUTS import Plot_CCLM
# option == 1 -> shift 4 with default cclm domain and nboundlines = 3
# option == 2 -> shift 4 with smaller cclm domain and nboundlines = 3
# option == 3 -> shift 4 with smaller cclm domain and nboundlines = 6
# option == 4 -> shift 4 with corrected smaller cclm domain and nboundlines = 3
# option == 5 -> shift 4 with corrected smaller cclm domain and nboundlines = 4
# option == 6 -> shift 4 with corrected smaller cclm domain and nboundlines = 6
# option == 7 -> shift 4 with corrected smaller cclm domain and nboundlines = 9
# option == 8 -> shift 4 with corrected bigger cclm domain and nboundlines = 3
from CCLM_OUTS import Plot_CCLM
#def f(x):
# if x==-9999:
# return float('NaN')
# else:
# return x
def read_data_from_mistral(dir='/work/bb1029/b324045/work1/work/member/post/',name='member_T_2M_ts_seasmean.nc',var='T_2M'):
# type: (object, object, object) -> object
#a function to read the data from mistral work
"""
:rtype: object
"""
#CMD = 'scp $mistral:' + dir + name + ' ./'
CMD = 'wget users.met.fu-berlin.de/~BijanFallah/' + dir + name
os.system(CMD)
nc = NetCDFFile(name)
# for name2, variable in nc.variables.items():
# for attrname in variable.ncattrs():
# print(name2, variable, '-----------------',attrname)
# #print("{} -- {}".format(attrname, getattr(variable, attrname)))
os.remove(name)
lats = nc.variables['lat'][:]
lons = nc.variables['lon'][:]
t = nc.variables[var][:].squeeze()
rlats = nc.variables['rlat'][:] # extract/copy the data
rlons = nc.variables['rlon'][:]
#f2 = np.vectorize(f)
#t= f2(t)
#t=t.data
t=t.squeeze()
#print()
nc.close()
return(t, lats, lons, rlats, rlons)
| mit |
hsu/chrono | src/demos/trackVehicle/validationPlots_test_M113.py | 5 | 4229 | # -*- coding: utf-8 -*-
"""
Created on Wed May 06 11:00:53 2015
@author: newJustin
"""
import ChronoTrack_pandas as CT
import pylab as py
if __name__ == '__main__':
# logger
import logging as lg
lg.basicConfig(fileName = 'logFile.log', level=lg.WARN, format='%(message)s')
# default font size
import matplotlib
font = {'size' : 14}
matplotlib.rc('font', **font)
# **********************************************************************
# =============== USER INPUT =======================================
# data dir, end w/ '/'
# data_dir = 'D:/Chrono_github_Build/bin/outdata_M113/'
data_dir = 'E:/Chrono_github_Build/bin/outdata_M113/'
'''
# list of data files to plot
chassis = 'M113_chassis.csv'
gearSubsys = 'M113_Side0_gear.csv'
idlerSubsys = 'M113_Side0_idler.csv'
# ptrainSubsys = 'test_driveChain_ptrain.csv'
shoe0 = 'M113_Side0_shoe0.csv'
'''
chassis = 'M113_400_200__chassis.csv'
gearSubsys = 'M113_400_200__Side0_gear.csv'
idlerSubsys = 'M113_400_200__Side0_idler.csv'
# ptrainSubsys = 'test_driveChain_ptrain.csv'
shoe0 = 'M113_400_200__Side0_shoe0.csv'
data_files = [data_dir + chassis, data_dir + gearSubsys, data_dir + idlerSubsys, data_dir + shoe0]
handle_list = ['chassis','gear','idler','shoe0']
# handle_list = ['Gear','idler','ptrain','shoe0','gearCV','idlerCV','rollerCV','gearContact','shoeGearContact']
'''
gearCV = 'test_driveChain_GearCV.csv'
idlerCV = 'test_driveChain_idler0CV.csv'
rollerCV = 'test_driveChain_roller0CV.csv'
gearContact = 'test_driveChain_gearContact.csv'
shoeGearContact = 'test_driveChain_shoe0GearContact.csv'
'''
# data_files = [data_dir + gearSubsys, data_dir + idlerSubsys, data_dir + ptrainSubsys, data_dir + shoe0, data_dir + gearCV, data_dir + idlerCV, data_dir + rollerCV, data_dir + gearContact, data_dir+shoeGearContact]
# handle_list = ['Gear','idler','ptrain','shoe0','gearCV','idlerCV','rollerCV','gearContact','shoeGearContact']
# list of data files for gear/pin comparison plots
# Primitive gear geometry
'''
gear = 'driveChain_P_gear.csv'
gearContact = 'driveChain_P_gearContact.csv'
shoe = 'driveChain_P_shoe0.csv'
shoeContact = 'driveChain_P_shoe0GearContact.csv'
ptrain = 'driveChain_P_ptrain.csv'
# Collision Callback gear geometry
gear = 'driveChain_CC_gear.csv'
gearContact = 'driveChain_CC_gearContact.csv'
shoe = 'driveChain_CC_shoe0.csv'
shoeContact = 'driveChain_CC_shoe0GearContact.csv'
ptrain = 'driveChain_CC_ptrain.csv'
data_files = [data_dir+gear, data_dir+gearContact, data_dir+shoe, data_dir+shoeContact, data_dir+ptrain]
handle_list = ['Gear','gearContact','shoe0','shoeGearContact','ptrain']
'''
# construct the panda class for the DriveChain, file list and list of legend
M113_Chain0 = CT.ChronoTrack_pandas(data_files, handle_list)
# set the time limits. tmin = -1 will plot the entire time range
tmin = 1.0
tmax = 8.0
#0) plot the chassis
M113_Chain0.plot_chassis(tmin, tmax)
# 1) plot the gear body info
M113_Chain0.plot_gear(tmin, tmax)
# 2) plot idler body info, tensioner force
M113_Chain0.plot_idler(tmin,tmax)
'''
# 3) plot powertrain info
M113_Chain0.plot_ptrain()
'''
# 4) plot shoe 0 body info, and pin 0 force/torque
M113_Chain0.plot_shoe(tmin,tmax)
'''
# 5) plot gear Constraint Violations
M113_Chain0.plot_gearCV(tmin,tmax)
# 6) plot idler Constraint Violations
M113_Chain0.plot_idlerCV(tmin,tmax)
# 7) plot roller Constraint Violations
M113_Chain0.plot_rollerCV(tmin,tmax)
# 8) from the contact report callback function, gear contact info
M113_Chain0.plot_gearContactInfo(tmin,tmax)
# 9) from shoe-gear report callback function, contact info
M113_Chain0.plot_shoeGearContactInfo(tmin,tmax)
'''
# 10) track shoe trajectory: rel-X vs. rel-Y
M113_Chain0.plot_trajectory(tmin,tmax)
py.show() | bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/metrics/tests/test_score_objects.py | 15 | 17443 | import pickle
import tempfile
import shutil
import os
import numbers
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import joblib
REGRESSION_SCORERS = ['r2', 'neg_mean_absolute_error',
'neg_mean_squared_error', 'neg_median_absolute_error',
'mean_absolute_error',
'mean_squared_error', 'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'neg_log_loss', 'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
def _make_estimators(X_train, y_train, y_ml_train):
# Make estimators that make sense to test various scoring methods
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
return dict(
[(name, sensible_regr) for name in REGRESSION_SCORERS] +
[(name, sensible_clf) for name in CLF_SCORERS] +
[(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
)
X_mm, y_mm, y_ml_mm = None, None, None
ESTIMATORS = None
TEMP_FOLDER = None
def setup_module():
# Create some memory mapped data
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
TEMP_FOLDER = tempfile.mkdtemp(prefix='sklearn_test_score_objects_')
X, y = make_classification(n_samples=30, n_features=5, random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
filename = os.path.join(TEMP_FOLDER, 'test_data.pkl')
joblib.dump((X, y, y_ml), filename)
X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode='r')
ESTIMATORS = _make_estimators(X_mm, y_mm, y_ml_mm)
def teardown_module():
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
# GC closes the mmap file descriptors
X_mm, y_mm, y_ml_mm, ESTIMATORS = None, None, None, None
shutil.rmtree(TEMP_FOLDER)
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_all_scorers_repr():
# Test that all scorers have a working repr
for name, scorer in SCORERS.items():
repr(scorer)
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('neg_log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
estimator = _make_estimators(X_train, y_train, y_ml_train)
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
@ignore_warnings # UndefinedMetricWarning for P / R scores
def check_scorer_memmap(scorer_name):
scorer, estimator = SCORERS[scorer_name], ESTIMATORS[scorer_name]
if scorer_name in MULTILABEL_ONLY_SCORERS:
score = scorer(estimator, X_mm, y_ml_mm)
else:
score = scorer(estimator, X_mm, y_mm)
assert isinstance(score, numbers.Number), scorer_name
def test_scorer_memmap_input():
# Non-regression test for #6147: some score functions would
# return singleton memmap when computed on memmap data instead of scalar
# float values.
for name in SCORERS.keys():
yield check_scorer_memmap, name
def test_deprecated_names():
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
for name in ('mean_absolute_error', 'mean_squared_error',
'median_absolute_error', 'log_loss'):
warning_msg = "Scoring method %s was renamed to" % name
for scorer in (get_scorer(name), SCORERS[name]):
assert_warns_message(DeprecationWarning,
warning_msg,
scorer, clf, X, y)
assert_warns_message(DeprecationWarning,
warning_msg,
cross_val_score, clf, X, y, scoring=name)
def test_scoring_is_not_metric():
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), f1_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), roc_auc_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
Ridge(), r2_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
KMeans(), adjusted_rand_score)
| mit |
wavelets/zipline | zipline/examples/dual_ema_talib.py | 2 | 3230 | #!/usr/bin/env python
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
from zipline.algorithm import TradingAlgorithm
from zipline.utils.factory import load_from_yahoo
# Import exponential moving average from talib wrapper
from zipline.transforms.ta import EMA
from datetime import datetime
import pytz
class DualEMATaLib(TradingAlgorithm):
"""Dual Moving Average Crossover algorithm.
This algorithm buys apple once its short moving average crosses
its long moving average (indicating upwards momentum) and sells
its shares once the averages cross again (indicating downwards
momentum).
"""
def initialize(self, short_window=20, long_window=40):
# Add 2 mavg transforms, one with a long window, one
# with a short window.
self.short_ema_trans = EMA(timeperiod=short_window)
self.long_ema_trans = EMA(timeperiod=long_window)
# To keep track of whether we invested in the stock or not
self.invested = False
def handle_data(self, data):
self.short_ema = self.short_ema_trans.handle_data(data)
self.long_ema = self.long_ema_trans.handle_data(data)
if self.short_ema is None or self.long_ema is None:
return
self.buy = False
self.sell = False
if (self.short_ema > self.long_ema).all() and not self.invested:
self.order('AAPL', 100)
self.invested = True
self.buy = True
elif (self.short_ema < self.long_ema).all() and self.invested:
self.order('AAPL', -100)
self.invested = False
self.sell = True
self.record(AAPL=data['AAPL'].price,
short_ema=self.short_ema['AAPL'],
long_ema=self.long_ema['AAPL'],
buy=self.buy,
sell=self.sell)
if __name__ == '__main__':
start = datetime(1990, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(1991, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_from_yahoo(stocks=['AAPL'], indexes={}, start=start,
end=end)
dma = DualEMATaLib()
results = dma.run(data).dropna()
fig = plt.figure()
ax1 = fig.add_subplot(211, ylabel='portfolio value')
results.portfolio_value.plot(ax=ax1)
ax2 = fig.add_subplot(212)
results[['AAPL', 'short_ema', 'long_ema']].plot(ax=ax2)
ax2.plot(results.ix[results.buy].index, results.short_ema[results.buy],
'^', markersize=10, color='m')
ax2.plot(results.ix[results.sell].index, results.short_ema[results.sell],
'v', markersize=10, color='k')
plt.legend(loc=0)
plt.gcf().set_size_inches(18, 8)
| apache-2.0 |
ChanChiChoi/scikit-learn | examples/model_selection/plot_roc.py | 146 | 3697 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
# Plot ROC curve
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]))
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
rcolasanti/CompaniesHouseScraper | DVLACompanyNmeMatchCoHoAPIFindMissing.py | 1 | 5174 |
import requests
import json
import numpy as np
import pandas as pd
import CoHouseToken
from difflib import SequenceMatcher
# In[3]:
def exactMatch(line1, line2):
line1=line1.upper().rstrip()
line2=line2.upper().rstrip()
#print("|"+line1+"|"+line2+"|",line1==line2)
return line1==line2
def aStopWord(word):
return word.upper().replace("COMPANY","CO").replace("LIMITED","LTD").replace("&","AND").rstrip()
def spaces(word):
w = word.upper().replace("/"," ")
w = w.replace("."," ").replace(","," ").replace("-"," ").rstrip()
return w
def removeAStopWord(word):
w = word.upper().replace("LTD"," ").replace("CO"," ").replace("AND"," ").replace("("," ").replace("/"," ")
w = w.replace(")"," ").replace("."," ").replace(","," ").replace("-"," ").rstrip()
return w
def removeABlank(word):
w = word.replace(" ","")
return w
def removeABracket (line):
flag = False
word=""
for a in line:
if a=="(":
flag = True
a=""
if a==")":
a=""
flag = False
if flag:
a=""
word+=a
return word
def stopWord(line1, line2):
line1=aStopWord(line1)
line2=aStopWord(line2)
#print("|"+line1+"|"+line2+"|",line1==line2)
return line1==line2
def removeStopWord(line1, line2):
line1=spaces(line1)
line2=spaces(line2)
line1=aStopWord(line1)
line2=aStopWord(line2)
line1=removeAStopWord(line1)
line2=removeAStopWord(line2)
#print("|"+line1+"|"+line2+"|",line1==line2)
return line1==line2
def removeBlanks(line1, line2):
line1=spaces(line1)
line2=spaces(line2)
line1=aStopWord(line1)
line2=aStopWord(line2)
line1=removeAStopWord(line1)
line2=removeAStopWord(line2)
line1=removeABlank(line1)
line2=removeABlank(line2)
return line1==line2
def removeBrackets(line1, line2):
line1=removeABracket(line1)
line2=removeABracket(line2)
line1=spaces(line1)
line2=spaces(line2)
line1=aStopWord(line1)
line2=aStopWord(line2)
line1=removeAStopWord(line1)
line2=removeAStopWord(line2)
line1=removeABlank(line1)
line2=removeABlank(line2)
#print("|"+line1+"|"+line2+"|",line1==line2)
return line1==line2
def strip(line1, line2):
line1=removeABracket(line1)
line2=removeABracket(line2)
line1=spaces(line1)
line2=spaces(line2)
line1=aStopWord(line1)
line2=aStopWord(line2)
line1=removeAStopWord(line1)
line2=removeAStopWord(line2)
line1=removeABlank(line1)
line2=removeABlank(line2)
return line1,line2
def match(company,results):
for i in results['items']:
line = i['title']
number = i['company_number']
if(exactMatch(company,line)):
return True,line,number
for i in results['items']:
line = i['title']
number = i['company_number']
if(stopWord(company,line)):
return True,line,number
for i in results['items']:
line = i['title']
number = i['company_number']
if(removeStopWord(company,line)):
return True,line,number
for i in results['items']:
line = i['title']
number = i['company_number']
if(removeBlanks(company,line)):
return True,line,number
for i in results['items']:
line = i['title']
number = i['company_number']
if(removeBrackets(company,line)):
return True,line,number
#old_match(company,results)
return False,"",""
def main(args):
print(args[0])
search_url ="https://api.companieshouse.gov.uk/search/companies?q="
token = CoHouseToken.getToken()
pw = ''
base_url = 'https://api.companieshouse.gov.uk'
file = args[1]
print(file)
df = pd.read_csv(file,names=['Organisation'])
companies = df.Organisation
count=0
found = open("found2.csv",'w')
missing = open("missing2.csv",'w')
for c in companies:
c =c.upper().replace("&","AND")
c = c.split(" T/A ")[0]
c = c.split("WAS ")[0]
c= spaces(c)
url=search_url+c
results = json.loads(requests.get(url, auth=(token,pw)).text)
for i , key in enumerate(results['items']):
a,b = strip(c, key['title'])
r = SequenceMatcher(None, a, b).ratio()
print("%s \t %s\t %.2f \t %s \t %s"%(i,c,r,key['company_number'],key['title']))
v = input('type number or return to reject: ')
if v =="":
print("reject")
missing.write("%s\n"%(c))
else:
key = results['items'][int(v)]
print("%s \t %s\t %.2f \t %s \t %s"%(v,c,r,key['company_number'],key['title']))
print("*************************")
found.write("%s,%s,%s,\n"%(c,key['title'],key['company_number']))
print()
#print(count/len(companies))
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| gpl-3.0 |
mclaughlin6464/pasta | pasta/ising.py | 1 | 5474 | '''
This is a dummy file for me to get started making an Ising model. I'll get this 2-D Ising running, then generalize.
'''
import argparse
from itertools import izip
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
sns.set()
def run_ising(N, d, K, J,h, n_steps, plot = False):
'''
:param N:
:param d:
:param K:
:param J:
:param h:
:param n_steps:
:param plot:
:return:
'''
if plot:
try:
assert d <= 2
except AssertionError:
raise AssertionError("Can only plot in one or two dimensions.")
#TODO wrap these better
assert N >0 and N < 1000
assert d > 0
assert n_steps > 0
np.random.seed(0)
size = tuple(N for i in xrange(d))
lattice = np.ones(size)
#make a random initial state
lattice-= np.random.randint(0,2, size =size)*2
# do different initialization
E_0 = energy(lattice, potential, K, h)
if plot:
plt.ion()
for step in xrange(n_steps):
if step%1000 == 0:
print step
site = tuple(np.random.randint(0, N, size=d))
# consider flipping this site
lattice[site] *= -1
E_f = energy(lattice, potential, K, h)
# if E_F < E_0, keep
# if E_F > E_0, keep randomly given change of energies
if E_f >= E_0:
keep = np.random.uniform() < np.exp(K / J * (E_0 - E_f))
else:
keep = True
if keep:
E_0 = E_f
else:
lattice[site] *= -1
# fig = plt.figure()
if plot and step % 100 == 0:
if d == 1:
plt.imshow(lattice.reshape((1, -1)),interpolation='none')
else:
plt.imshow(lattice, interpolation='none')
plt.title(correlation(lattice, N/2))
plt.pause(0.01)
plt.clf()
return np.array([correlation(lattice, r) for r in xrange(1, N/2+1)])
def get_NN(site, N, d, r= 1):
'''
The NN of the site. Will only return those UP in index (east, south, and down) to avoid double counting.
Accounts for PBC
:param site:
(d,) array of coordinates in the lattice
:param N:
Size of one side of the lattice
:param d:
dimension of the lattice
:return:
dxd numpy array where each row corresponds to the nearest neighbors.
'''
mult_sites = np.r_[ [site for i in xrange(d)]]
adjustment = np.eye(d)*r
return ((mult_sites+adjustment)%N).astype(int)
def potential(s1, s2, K, h):
'''
Basic Ising potential
:param s1:
First spin (-1 or 1)
:param s2:
Second spin
:param K:
Coupling constant
:return:
Energy of this particular bond
'''
return -1*K*s1*s2 - h/2*(s1+s2)#should this be abstracted to call the NN function?
def energy(lattice, potential, K, h = 0):
'''
Calculate the energy of a lattice
:param lattice:
Lattice to calculate the energy on
:param potential:
Function defining the potential of a given site.
:return:
Energy of the lattice
'''
N = lattice.shape[0]
d = len(lattice.shape)
dim_slices = np.meshgrid(*(xrange(N) for i in xrange(d)), indexing = 'ij')
all_sites = izip(*[slice.flatten() for slice in dim_slices])
E = 0
for site in all_sites:
nn = get_NN(site, N, d)
for neighbor in nn:
E+=potential(lattice[site], lattice[tuple(neighbor)],K = K, h = h)
return E
def magnetization(lattice):
return lattice.mean()
def correlation(lattice, r):
'''
The average spin correlation at distance r.
:param lattice:
The lattice to calculate the statistic on.
:param r:
Distance to measure correlation
:return:
'''
N = lattice.shape[0]
d = len(lattice.shape)
dim_slices = np.meshgrid(*(xrange(N) for i in xrange(d)), indexing='ij')
all_sites = izip(*[slice.flatten() for slice in dim_slices])
xi = 0
for site in all_sites:
nn = get_NN(site, N, d, r)
for neighbor in nn:
xi += lattice[site]*lattice[tuple(neighbor)]
return xi/((N**d)*d)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Simulate an ising model')
parser.add_argument('N', type = int, help = 'Length of one side of the cube.')
parser.add_argument('d', type = int, help = 'Number of dimensions of the cube.')
#parser.add_argument('K', type = float, help ='Bond coupling strength.')
parser.add_argument('J', type = float, default = 1.0, nargs = '?',\
help = 'Energy of bond strength. Optional, default is 1.')
parser.add_argument('h', type = float, default=0.0, nargs = '?',\
help = 'Magnetic field strength. Optional, default is 0.')
parser.add_argument('n_steps', type = int, default = 1000, nargs = '?',\
help = 'Number of steps to simulate. Default is 1e5')
parser.add_argument('--plot', action = 'store_true',\
help = 'Whether or not to plot results. Only allowed with d = 1 or 2.')
args = parser.parse_args()
spins = []
Ks = [ 0.5,0.6,0.65, 0.7,0.8, 0.9]
for K in Ks:
print K
spins.append(run_ising(K = K, **vars(args)))
for K, spin in izip(Ks, spins):
plt.plot(spin, label = K )
plt.legend(loc = 'best')
plt.ylim([-0.1, 1.1])
plt.show() | mit |
nicholaschris/landsatpy | stuff.py | 1 | 1864 | import cloud_detection_new as cloud_detection
from matplotlib import pyplot as plt
import views
from skimage import exposure
nir = cloud_detection.get_nir()[0:600,2000:2600]
red = cloud_detection.get_red()[0:600,2000:2600]
green = cloud_detection.get_green()[0:600,2000:2600]
blue = cloud_detection.get_blue()[0:600,2000:2600] # or use coastal
coastal = cloud_detection.get_coastal()[0:600,2000:2600]
marine_shadow_index = (green-blue)/(green+blue)
img = views.create_composite(red, green, blue)
img_rescale = exposure.rescale_intensity(img, in_range=(0, 90))
plt.rcParams['savefig.facecolor'] = "0.8"
vmin, vmax=0.0,0.1
def example_plot(ax, data, fontsize=12):
ax.imshow(data, vmin=vmin, vmax=vmax)
ax.locator_params(nbins=3)
ax.set_xlabel('x-label', fontsize=fontsize)
ax.set_ylabel('y-label', fontsize=fontsize)
ax.set_title('Title', fontsize=fontsize)
plt.close('all')
fig = plt.figure
ax1=plt.subplot(243)
ax2=plt.subplot(244)
ax3=plt.subplot(247)
ax4=plt.subplot(248)
ax5=plt.subplot(121)
a_coastal = coastal[500:600, 500:600]
a_blue = blue[500:600, 500:600]
a_green = green[500:600, 500:600]
a_red = red[500:600, 500:600]
a_nir = nir[500:600, 500:600]
a_img = img[500:600, 500:600]
spec1 = [a_coastal[60, 60], a_blue[60, 60], a_green[60, 60], a_red[60, 60], a_nir[60, 60]]
b_coastal = coastal[200:300, 100:200]
b_blue = blue[200:300, 100:200]
b_green = green[200:300, 100:200]
b_red = red[200:300, 100:200]
b_nir = nir[200:300, 100:200]
b_img = img[200:300, 100:200]
example_plot(ax1, coastal)
example_plot(ax2, blue)
example_plot(ax3, green)
example_plot(ax4, red)
ax5.imshow(img)
# plt.tight_layout()
plt.close('all')
spec = [b_coastal[60, 60], b_blue[60, 60], b_green[60, 60], b_red[60, 60], b_nir[60, 60]]
plt.plot(spec, 'k*-')
plt.plot(spec1, 'k.-')
plt.close('all')
cbg = (coastal+blue+green)/3
plt.imshow(cbg/red) | mit |
Monika319/EWEF-1 | Cw2Rezonans/Karolina/Oscyloskop/OscyloskopZ5W2.py | 1 | 1312 | # -*- coding: utf-8 -*-
"""
Plot oscilloscope files from MultiSim
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
from matplotlib import rc
rc('font',family="Consolas")
files=["real_zad5_05f_p2.txt"]
for NazwaPliku in files:
print NazwaPliku
Plik=open(NazwaPliku)
#print DeltaT
Dane=Plik.readlines()#[4:]
DeltaT=float(Dane[2].split()[3].replace(",","."))
#M=len(Dane[4].split())/2
M=2
Dane=Dane[5:]
Plik.close()
print M
Ys=[np.zeros(len(Dane)) for i in range(M)]
for m in range(M):
for i in range(len(Dane)):
try:
Ys[m][i]=float(Dane[i].split()[2+3*m].replace(",","."))
except:
print m, i, 2+3*m, len(Dane[i].split()), Dane[i].split()
#print i, Y[i]
X=np.zeros_like(Ys[0])
for i in range(len(X)):
X[i]=i*DeltaT
for y in Ys:
print max(y)-min(y)
Opis=u"Układ szeregowy\nPołowa częstotliwości rezonansowej"
Nazwa=u"Z5W2"
plt.title(u"Przebieg napięciowy\n"+Opis)
plt.xlabel(u"Czas t [s]")
plt.ylabel(u"Napięcie [V]")
plt.plot(X,Ys[0],label=u"Wejście")
plt.plot(X,Ys[1],label=u"Wyjście")
plt.grid()
plt.legend(loc="best")
plt.savefig(Nazwa + ".png", bbox_inches='tight')
plt.show()
| gpl-2.0 |
nddsg/TreeDecomps | xplodnTree/tdec/b2CliqueTreeRules.py | 1 | 3569 | #!/usr/bin/env python
__author__ = 'saguinag' + '@' + 'nd.edu'
__version__ = "0.1.0"
##
## fname "b2CliqueTreeRules.py"
##
## TODO: some todo list
## VersionLog:
import net_metrics as metrics
import pandas as pd
import argparse, traceback
import os, sys
import networkx as nx
import re
from collections import deque, defaultdict, Counter
import tree_decomposition as td
import PHRG as phrg
import probabilistic_cfg as pcfg
import exact_phrg as xphrg
import a1_hrg_cliq_tree as nfld
from a1_hrg_cliq_tree import load_edgelist
DEBUG = False
def get_parser ():
parser = argparse.ArgumentParser(description='b2CliqueTreeRules.py: given a tree derive grammar rules')
parser.add_argument('-t', '--treedecomp', required=True, help='input tree decomposition (dimacs file format)')
parser.add_argument('--version', action='version', version=__version__)
return parser
def dimacs_td_ct (tdfname):
""" tree decomp to clique-tree """
print '... input file:', tdfname
fname = tdfname
graph_name = os.path.basename(fname)
gname = graph_name.split('.')[0]
gfname = "datasets/out." + gname
tdh = os.path.basename(fname).split('.')[1] # tree decomp heuristic
tfname = gname+"."+tdh
G = load_edgelist(gfname)
if DEBUG: print nx.info(G)
print
with open(fname, 'r') as f: # read tree decomp from inddgo
lines = f.readlines()
lines = [x.rstrip('\r\n') for x in lines]
cbags = {}
bags = [x.split() for x in lines if x.startswith('B')]
for b in bags:
cbags[int(b[1])] = [int(x) for x in b[3:]] # what to do with bag size?
edges = [x.split()[1:] for x in lines if x.startswith('e')]
edges = [[int(k) for k in x] for x in edges]
tree = defaultdict(set)
for s, t in edges:
tree[frozenset(cbags[s])].add(frozenset(cbags[t]))
if DEBUG: print '.. # of keys in `tree`:', len(tree.keys())
if DEBUG: print tree.keys()
root = list(tree)[0]
if DEBUG: print '.. Root:', root
root = frozenset(cbags[1])
if DEBUG: print '.. Root:', root
T = td.make_rooted(tree, root)
if DEBUG: print '.. T rooted:', len(T)
# nfld.unfold_2wide_tuple(T) # lets me display the tree's frozen sets
T = phrg.binarize(T)
prod_rules = {}
td.new_visit(T, G, prod_rules)
if DEBUG: print "--------------------"
if DEBUG: print "- Production Rules -"
if DEBUG: print "--------------------"
for k in prod_rules.iterkeys():
if DEBUG: print k
s = 0
for d in prod_rules[k]:
s += prod_rules[k][d]
for d in prod_rules[k]:
prod_rules[k][d] = float(prod_rules[k][d]) / float(s) # normailization step to create probs not counts.
if DEBUG: print '\t -> ', d, prod_rules[k][d]
rules = []
id = 0
for k, v in prod_rules.iteritems():
sid = 0
for x in prod_rules[k]:
rhs = re.findall("[^()]+", x)
rules.append(("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x]))
if DEBUG: print ("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x])
sid += 1
id += 1
df = pd.DataFrame(rules)
outdf_fname = "./ProdRules/"+tfname+".prules"
if not os.path.isfile(outdf_fname+".bz2"):
print '...',outdf_fname, "written"
df.to_csv(outdf_fname+".bz2", compression="bz2")
else:
print '...', outdf_fname, "file exists"
return
def main ():
parser = get_parser()
args = vars(parser.parse_args())
dimacs_td_ct(args['treedecomp']) # gen synth graph
if __name__ == '__main__':
try:
main()
except Exception, e:
print str(e)
traceback.print_exc()
sys.exit(1)
sys.exit(0)
| mit |
apache/spark | python/pyspark/sql/functions.py | 14 | 161861 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A collections of builtin functions
"""
import sys
import functools
import warnings
from pyspark import since, SparkContext
from pyspark.rdd import PythonEvalType
from pyspark.sql.column import Column, _to_java_column, _to_seq, _create_column_from_literal
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import StringType, DataType
# Keep UserDefinedFunction import for backwards compatible import; moved in SPARK-22409
from pyspark.sql.udf import UserDefinedFunction, _create_udf # noqa: F401
from pyspark.sql.udf import _create_udf
# Keep pandas_udf and PandasUDFType import for backwards compatible import; moved in SPARK-28264
from pyspark.sql.pandas.functions import pandas_udf, PandasUDFType # noqa: F401
from pyspark.sql.utils import to_str
# Note to developers: all of PySpark functions here take string as column names whenever possible.
# Namely, if columns are referred as arguments, they can be always both Column or string,
# even though there might be few exceptions for legacy or inevitable reasons.
# If you are fixing other language APIs together, also please note that Scala side is not the case
# since it requires to make every single overridden definition.
def _get_get_jvm_function(name, sc):
"""
Retrieves JVM function identified by name from
Java gateway associated with sc.
"""
return getattr(sc._jvm.functions, name)
def _invoke_function(name, *args):
"""
Invokes JVM function identified by name with args
and wraps the result with :class:`~pyspark.sql.Column`.
"""
jf = _get_get_jvm_function(name, SparkContext._active_spark_context)
return Column(jf(*args))
def _invoke_function_over_column(name, col):
"""
Invokes unary JVM function identified by name
and wraps the result with :class:`~pyspark.sql.Column`.
"""
return _invoke_function(name, _to_java_column(col))
def _invoke_binary_math_function(name, col1, col2):
"""
Invokes binary JVM math function identified by name
and wraps the result with :class:`~pyspark.sql.Column`.
"""
return _invoke_function(
name,
# For legacy reasons, the arguments here can be implicitly converted into floats,
# if they are not columns or strings.
_to_java_column(col1) if isinstance(col1, (str, Column)) else float(col1),
_to_java_column(col2) if isinstance(col2, (str, Column)) else float(col2)
)
def _options_to_str(options=None):
if options:
return {key: to_str(value) for (key, value) in options.items()}
return {}
def lit(col):
"""
Creates a :class:`~pyspark.sql.Column` of literal value.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(lit(5).alias('height')).withColumn('spark_user', lit(True)).take(1)
[Row(height=5, spark_user=True)]
"""
return col if isinstance(col, Column) else _invoke_function("lit", col)
@since(1.3)
def col(col):
"""
Returns a :class:`~pyspark.sql.Column` based on the given column name.'
Examples
--------
>>> col('x')
Column<'x'>
>>> column('x')
Column<'x'>
"""
return _invoke_function("col", col)
column = col
@since(1.3)
def asc(col):
"""
Returns a sort expression based on the ascending order of the given column name.
"""
return (
col.asc() if isinstance(col, Column)
else _invoke_function("asc", col)
)
@since(1.3)
def desc(col):
"""
Returns a sort expression based on the descending order of the given column name.
"""
return (
col.desc() if isinstance(col, Column)
else _invoke_function("desc", col)
)
@since(1.3)
def sqrt(col):
"""
Computes the square root of the specified float value.
"""
return _invoke_function_over_column("sqrt", col)
@since(1.3)
def abs(col):
"""
Computes the absolute value.
"""
return _invoke_function_over_column("abs", col)
@since(1.3)
def max(col):
"""
Aggregate function: returns the maximum value of the expression in a group.
"""
return _invoke_function_over_column("max", col)
@since(1.3)
def min(col):
"""
Aggregate function: returns the minimum value of the expression in a group.
"""
return _invoke_function_over_column("min", col)
@since(1.3)
def count(col):
"""
Aggregate function: returns the number of items in a group.
"""
return _invoke_function_over_column("count", col)
@since(1.3)
def sum(col):
"""
Aggregate function: returns the sum of all values in the expression.
"""
return _invoke_function_over_column("sum", col)
@since(1.3)
def avg(col):
"""
Aggregate function: returns the average of the values in a group.
"""
return _invoke_function_over_column("avg", col)
@since(1.3)
def mean(col):
"""
Aggregate function: returns the average of the values in a group.
"""
return _invoke_function_over_column("mean", col)
@since(1.3)
def sumDistinct(col):
"""
Aggregate function: returns the sum of distinct values in the expression.
.. deprecated:: 3.2.0
Use :func:`sum_distinct` instead.
"""
warnings.warn("Deprecated in 3.2, use sum_distinct instead.", FutureWarning)
return sum_distinct(col)
@since(3.2)
def sum_distinct(col):
"""
Aggregate function: returns the sum of distinct values in the expression.
"""
return _invoke_function_over_column("sum_distinct", col)
def product(col):
"""
Aggregate function: returns the product of the values in a group.
.. versionadded:: 3.2.0
Parameters
----------
col : str, :class:`Column`
column containing values to be multiplied together
Examples
--------
>>> df = spark.range(1, 10).toDF('x').withColumn('mod3', col('x') % 3)
>>> prods = df.groupBy('mod3').agg(product('x').alias('product'))
>>> prods.orderBy('mod3').show()
+----+-------+
|mod3|product|
+----+-------+
| 0| 162.0|
| 1| 28.0|
| 2| 80.0|
+----+-------+
"""
return _invoke_function_over_column("product", col)
def acos(col):
"""
.. versionadded:: 1.4.0
Returns
-------
:class:`~pyspark.sql.Column`
inverse cosine of `col`, as if computed by `java.lang.Math.acos()`
"""
return _invoke_function_over_column("acos", col)
def acosh(col):
"""
Computes inverse hyperbolic cosine of the input column.
.. versionadded:: 3.1.0
Returns
-------
:class:`~pyspark.sql.Column`
"""
return _invoke_function_over_column("acosh", col)
def asin(col):
"""
.. versionadded:: 1.3.0
Returns
-------
:class:`~pyspark.sql.Column`
inverse sine of `col`, as if computed by `java.lang.Math.asin()`
"""
return _invoke_function_over_column("asin", col)
def asinh(col):
"""
Computes inverse hyperbolic sine of the input column.
.. versionadded:: 3.1.0
Returns
-------
:class:`~pyspark.sql.Column`
"""
return _invoke_function_over_column("asinh", col)
def atan(col):
"""
.. versionadded:: 1.4.0
Returns
-------
:class:`~pyspark.sql.Column`
inverse tangent of `col`, as if computed by `java.lang.Math.atan()`
"""
return _invoke_function_over_column("atan", col)
def atanh(col):
"""
Computes inverse hyperbolic tangent of the input column.
.. versionadded:: 3.1.0
Returns
-------
:class:`~pyspark.sql.Column`
"""
return _invoke_function_over_column("atanh", col)
@since(1.4)
def cbrt(col):
"""
Computes the cube-root of the given value.
"""
return _invoke_function_over_column("cbrt", col)
@since(1.4)
def ceil(col):
"""
Computes the ceiling of the given value.
"""
return _invoke_function_over_column("ceil", col)
def cos(col):
"""
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
angle in radians
Returns
-------
:class:`~pyspark.sql.Column`
cosine of the angle, as if computed by `java.lang.Math.cos()`.
"""
return _invoke_function_over_column("cos", col)
def cosh(col):
"""
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
hyperbolic angle
Returns
-------
:class:`~pyspark.sql.Column`
hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh()`
"""
return _invoke_function_over_column("cosh", col)
@since(1.4)
def exp(col):
"""
Computes the exponential of the given value.
"""
return _invoke_function_over_column("exp", col)
@since(1.4)
def expm1(col):
"""
Computes the exponential of the given value minus one.
"""
return _invoke_function_over_column("expm1", col)
@since(1.4)
def floor(col):
"""
Computes the floor of the given value.
"""
return _invoke_function_over_column("floor", col)
@since(1.4)
def log(col):
"""
Computes the natural logarithm of the given value.
"""
return _invoke_function_over_column("log", col)
@since(1.4)
def log10(col):
"""
Computes the logarithm of the given value in Base 10.
"""
return _invoke_function_over_column("log10", col)
@since(1.4)
def log1p(col):
"""
Computes the natural logarithm of the given value plus one.
"""
return _invoke_function_over_column("log1p", col)
@since(1.4)
def rint(col):
"""
Returns the double value that is closest in value to the argument and
is equal to a mathematical integer.
"""
return _invoke_function_over_column("rint", col)
@since(1.4)
def signum(col):
"""
Computes the signum of the given value.
"""
return _invoke_function_over_column("signum", col)
def sin(col):
"""
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
Returns
-------
:class:`~pyspark.sql.Column`
sine of the angle, as if computed by `java.lang.Math.sin()`
"""
return _invoke_function_over_column("sin", col)
def sinh(col):
"""
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
hyperbolic angle
Returns
-------
:class:`~pyspark.sql.Column`
hyperbolic sine of the given value,
as if computed by `java.lang.Math.sinh()`
"""
return _invoke_function_over_column("sinh", col)
def tan(col):
"""
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
angle in radians
Returns
-------
:class:`~pyspark.sql.Column`
tangent of the given value, as if computed by `java.lang.Math.tan()`
"""
return _invoke_function_over_column("tan", col)
def tanh(col):
"""
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
hyperbolic angle
Returns
-------
:class:`~pyspark.sql.Column`
hyperbolic tangent of the given value
as if computed by `java.lang.Math.tanh()`
"""
return _invoke_function_over_column("tanh", col)
@since(1.4)
def toDegrees(col):
"""
.. deprecated:: 2.1.0
Use :func:`degrees` instead.
"""
warnings.warn("Deprecated in 2.1, use degrees instead.", FutureWarning)
return degrees(col)
@since(1.4)
def toRadians(col):
"""
.. deprecated:: 2.1.0
Use :func:`radians` instead.
"""
warnings.warn("Deprecated in 2.1, use radians instead.", FutureWarning)
return radians(col)
@since(1.4)
def bitwiseNOT(col):
"""
Computes bitwise not.
.. deprecated:: 3.2.0
Use :func:`bitwise_not` instead.
"""
warnings.warn("Deprecated in 3.2, use bitwise_not instead.", FutureWarning)
return bitwise_not(col)
@since(3.2)
def bitwise_not(col):
"""
Computes bitwise not.
"""
return _invoke_function_over_column("bitwise_not", col)
@since(2.4)
def asc_nulls_first(col):
"""
Returns a sort expression based on the ascending order of the given
column name, and null values return before non-null values.
"""
return (
col.asc_nulls_first() if isinstance(col, Column)
else _invoke_function("asc_nulls_first", col)
)
@since(2.4)
def asc_nulls_last(col):
"""
Returns a sort expression based on the ascending order of the given
column name, and null values appear after non-null values.
"""
return (
col.asc_nulls_last() if isinstance(col, Column)
else _invoke_function("asc_nulls_last", col)
)
@since(2.4)
def desc_nulls_first(col):
"""
Returns a sort expression based on the descending order of the given
column name, and null values appear before non-null values.
"""
return (
col.desc_nulls_first() if isinstance(col, Column)
else _invoke_function("desc_nulls_first", col)
)
@since(2.4)
def desc_nulls_last(col):
"""
Returns a sort expression based on the descending order of the given
column name, and null values appear after non-null values.
"""
return (
col.desc_nulls_last() if isinstance(col, Column)
else _invoke_function("desc_nulls_last", col)
)
@since(1.6)
def stddev(col):
"""
Aggregate function: alias for stddev_samp.
"""
return _invoke_function_over_column("stddev", col)
@since(1.6)
def stddev_samp(col):
"""
Aggregate function: returns the unbiased sample standard deviation of
the expression in a group.
"""
return _invoke_function_over_column("stddev_samp", col)
@since(1.6)
def stddev_pop(col):
"""
Aggregate function: returns population standard deviation of
the expression in a group.
"""
return _invoke_function_over_column("stddev_pop", col)
@since(1.6)
def variance(col):
"""
Aggregate function: alias for var_samp
"""
return _invoke_function_over_column("variance", col)
@since(1.6)
def var_samp(col):
"""
Aggregate function: returns the unbiased sample variance of
the values in a group.
"""
return _invoke_function_over_column("var_samp", col)
@since(1.6)
def var_pop(col):
"""
Aggregate function: returns the population variance of the values in a group.
"""
return _invoke_function_over_column("var_pop", col)
@since(1.6)
def skewness(col):
"""
Aggregate function: returns the skewness of the values in a group.
"""
return _invoke_function_over_column("skewness", col)
@since(1.6)
def kurtosis(col):
"""
Aggregate function: returns the kurtosis of the values in a group.
"""
return _invoke_function_over_column("kurtosis", col)
def collect_list(col):
"""
Aggregate function: returns a list of objects with duplicates.
.. versionadded:: 1.6.0
Notes
-----
The function is non-deterministic because the order of collected results depends
on the order of the rows which may be non-deterministic after a shuffle.
Examples
--------
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_list('age')).collect()
[Row(collect_list(age)=[2, 5, 5])]
"""
return _invoke_function_over_column("collect_list", col)
def collect_set(col):
"""
Aggregate function: returns a set of objects with duplicate elements eliminated.
.. versionadded:: 1.6.0
Notes
-----
The function is non-deterministic because the order of collected results depends
on the order of the rows which may be non-deterministic after a shuffle.
Examples
--------
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_set('age')).collect()
[Row(collect_set(age)=[5, 2])]
"""
return _invoke_function_over_column("collect_set", col)
def degrees(col):
"""
Converts an angle measured in radians to an approximately equivalent angle
measured in degrees.
.. versionadded:: 2.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
angle in radians
Returns
-------
:class:`~pyspark.sql.Column`
angle in degrees, as if computed by `java.lang.Math.toDegrees()`
"""
return _invoke_function_over_column("degrees", col)
def radians(col):
"""
Converts an angle measured in degrees to an approximately equivalent angle
measured in radians.
.. versionadded:: 2.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
angle in degrees
Returns
-------
:class:`~pyspark.sql.Column`
angle in radians, as if computed by `java.lang.Math.toRadians()`
"""
return _invoke_function_over_column("radians", col)
def atan2(col1, col2):
"""
.. versionadded:: 1.4.0
Parameters
----------
col1 : str, :class:`~pyspark.sql.Column` or float
coordinate on y-axis
col2 : str, :class:`~pyspark.sql.Column` or float
coordinate on x-axis
Returns
-------
:class:`~pyspark.sql.Column`
the `theta` component of the point
(`r`, `theta`)
in polar coordinates that corresponds to the point
(`x`, `y`) in Cartesian coordinates,
as if computed by `java.lang.Math.atan2()`
"""
return _invoke_binary_math_function("atan2", col1, col2)
@since(1.4)
def hypot(col1, col2):
"""
Computes ``sqrt(a^2 + b^2)`` without intermediate overflow or underflow.
"""
return _invoke_binary_math_function("hypot", col1, col2)
@since(1.4)
def pow(col1, col2):
"""
Returns the value of the first argument raised to the power of the second argument.
"""
return _invoke_binary_math_function("pow", col1, col2)
@since(1.6)
def row_number():
"""
Window function: returns a sequential number starting at 1 within a window partition.
"""
return _invoke_function("row_number")
@since(1.6)
def dense_rank():
"""
Window function: returns the rank of rows within a window partition, without any gaps.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the DENSE_RANK function in SQL.
"""
return _invoke_function("dense_rank")
@since(1.6)
def rank():
"""
Window function: returns the rank of rows within a window partition.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the RANK function in SQL.
"""
return _invoke_function("rank")
@since(1.6)
def cume_dist():
"""
Window function: returns the cumulative distribution of values within a window partition,
i.e. the fraction of rows that are below the current row.
"""
return _invoke_function("cume_dist")
@since(1.6)
def percent_rank():
"""
Window function: returns the relative rank (i.e. percentile) of rows within a window partition.
"""
return _invoke_function("percent_rank")
@since(1.3)
def approxCountDistinct(col, rsd=None):
"""
.. deprecated:: 2.1.0
Use :func:`approx_count_distinct` instead.
"""
warnings.warn("Deprecated in 2.1, use approx_count_distinct instead.", FutureWarning)
return approx_count_distinct(col, rsd)
def approx_count_distinct(col, rsd=None):
"""Aggregate function: returns a new :class:`~pyspark.sql.Column` for approximate distinct count
of column `col`.
.. versionadded:: 2.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
rsd : float, optional
maximum relative standard deviation allowed (default = 0.05).
For rsd < 0.01, it is more efficient to use :func:`count_distinct`
Examples
--------
>>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect()
[Row(distinct_ages=2)]
"""
sc = SparkContext._active_spark_context
if rsd is None:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col))
else:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd)
return Column(jc)
@since(1.6)
def broadcast(df):
"""Marks a DataFrame as small enough for use in broadcast joins."""
sc = SparkContext._active_spark_context
return DataFrame(sc._jvm.functions.broadcast(df._jdf), df.sql_ctx)
def coalesce(*cols):
"""Returns the first column that is not null.
.. versionadded:: 1.4.0
Examples
--------
>>> cDf = spark.createDataFrame([(None, None), (1, None), (None, 2)], ("a", "b"))
>>> cDf.show()
+----+----+
| a| b|
+----+----+
|null|null|
| 1|null|
|null| 2|
+----+----+
>>> cDf.select(coalesce(cDf["a"], cDf["b"])).show()
+--------------+
|coalesce(a, b)|
+--------------+
| null|
| 1|
| 2|
+--------------+
>>> cDf.select('*', coalesce(cDf["a"], lit(0.0))).show()
+----+----+----------------+
| a| b|coalesce(a, 0.0)|
+----+----+----------------+
|null|null| 0.0|
| 1|null| 1.0|
|null| 2| 0.0|
+----+----+----------------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.coalesce(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def corr(col1, col2):
"""Returns a new :class:`~pyspark.sql.Column` for the Pearson Correlation Coefficient for
``col1`` and ``col2``.
.. versionadded:: 1.6.0
Examples
--------
>>> a = range(20)
>>> b = [2 * x for x in range(20)]
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(corr("a", "b").alias('c')).collect()
[Row(c=1.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.corr(_to_java_column(col1), _to_java_column(col2)))
def covar_pop(col1, col2):
"""Returns a new :class:`~pyspark.sql.Column` for the population covariance of ``col1`` and
``col2``.
.. versionadded:: 2.0.0
Examples
--------
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_pop("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_pop(_to_java_column(col1), _to_java_column(col2)))
def covar_samp(col1, col2):
"""Returns a new :class:`~pyspark.sql.Column` for the sample covariance of ``col1`` and
``col2``.
.. versionadded:: 2.0.0
Examples
--------
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_samp("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_samp(_to_java_column(col1), _to_java_column(col2)))
def countDistinct(col, *cols):
"""Returns a new :class:`~pyspark.sql.Column` for distinct count of ``col`` or ``cols``.
An alias of :func:`count_distinct`, and it is encouraged to use :func:`count_distinct`
directly.
.. versionadded:: 1.3.0
"""
return count_distinct(col, *cols)
def count_distinct(col, *cols):
"""Returns a new :class:`Column` for distinct count of ``col`` or ``cols``.
.. versionadded:: 3.2.0
Examples
--------
>>> df.agg(count_distinct(df.age, df.name).alias('c')).collect()
[Row(c=2)]
>>> df.agg(count_distinct("age", "name").alias('c')).collect()
[Row(c=2)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.count_distinct(_to_java_column(col), _to_seq(sc, cols, _to_java_column))
return Column(jc)
def first(col, ignorenulls=False):
"""Aggregate function: returns the first value in a group.
The function by default returns the first values it sees. It will return the first non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. versionadded:: 1.3.0
Notes
-----
The function is non-deterministic because its results depends on the order of the
rows which may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.first(_to_java_column(col), ignorenulls)
return Column(jc)
def grouping(col):
"""
Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated
or not, returns 1 for aggregated or 0 for not aggregated in the result set.
.. versionadded:: 2.0.0
Examples
--------
>>> df.cube("name").agg(grouping("name"), sum("age")).orderBy("name").show()
+-----+--------------+--------+
| name|grouping(name)|sum(age)|
+-----+--------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+--------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping(_to_java_column(col))
return Column(jc)
def grouping_id(*cols):
"""
Aggregate function: returns the level of grouping, equals to
(grouping(c1) << (n-1)) + (grouping(c2) << (n-2)) + ... + grouping(cn)
.. versionadded:: 2.0.0
Notes
-----
The list of columns should match with grouping columns exactly, or empty (means all
the grouping columns).
Examples
--------
>>> df.cube("name").agg(grouping_id(), sum("age")).orderBy("name").show()
+-----+-------------+--------+
| name|grouping_id()|sum(age)|
+-----+-------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+-------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping_id(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def input_file_name():
"""Creates a string column for the file name of the current Spark task.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.input_file_name())
def isnan(col):
"""An expression that returns true iff the column is NaN.
.. versionadded:: 1.6.0
Examples
--------
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(isnan("a").alias("r1"), isnan(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnan(_to_java_column(col)))
def isnull(col):
"""An expression that returns true iff the column is null.
.. versionadded:: 1.6.0
Examples
--------
>>> df = spark.createDataFrame([(1, None), (None, 2)], ("a", "b"))
>>> df.select(isnull("a").alias("r1"), isnull(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnull(_to_java_column(col)))
def last(col, ignorenulls=False):
"""Aggregate function: returns the last value in a group.
The function by default returns the last values it sees. It will return the last non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. versionadded:: 1.3.0
Notes
-----
The function is non-deterministic because its results depends on the order of the
rows which may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.last(_to_java_column(col), ignorenulls)
return Column(jc)
def monotonically_increasing_id():
"""A column that generates monotonically increasing 64-bit integers.
The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
The current implementation puts the partition ID in the upper 31 bits, and the record number
within each partition in the lower 33 bits. The assumption is that the data frame has
less than 1 billion partitions, and each partition has less than 8 billion records.
.. versionadded:: 1.6.0
Notes
-----
The function is non-deterministic because its result depends on partition IDs.
As an example, consider a :class:`DataFrame` with two partitions, each with 3 records.
This expression would return the following IDs:
0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
>>> df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1'])
>>> df0.select(monotonically_increasing_id().alias('id')).collect()
[Row(id=0), Row(id=1), Row(id=2), Row(id=8589934592), Row(id=8589934593), Row(id=8589934594)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.monotonically_increasing_id())
def nanvl(col1, col2):
"""Returns col1 if it is not NaN, or col2 if col1 is NaN.
Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`).
.. versionadded:: 1.6.0
Examples
--------
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect()
[Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2)))
def percentile_approx(col, percentage, accuracy=10000):
"""Returns the approximate `percentile` of the numeric column `col` which is the smallest value
in the ordered `col` values (sorted from least to greatest) such that no more than `percentage`
of `col` values is less than the value or equal to that value.
The value of percentage must be between 0.0 and 1.0.
The accuracy parameter (default: 10000)
is a positive numeric literal which controls approximation accuracy at the cost of memory.
Higher value of accuracy yields better accuracy, 1.0/accuracy is the relative error
of the approximation.
When percentage is an array, each value of the percentage array must be between 0.0 and 1.0.
In this case, returns the approximate percentile array of column col
at the given percentage array.
.. versionadded:: 3.1.0
Examples
--------
>>> key = (col("id") % 3).alias("key")
>>> value = (randn(42) + key * 10).alias("value")
>>> df = spark.range(0, 1000, 1, 1).select(key, value)
>>> df.select(
... percentile_approx("value", [0.25, 0.5, 0.75], 1000000).alias("quantiles")
... ).printSchema()
root
|-- quantiles: array (nullable = true)
| |-- element: double (containsNull = false)
>>> df.groupBy("key").agg(
... percentile_approx("value", 0.5, lit(1000000)).alias("median")
... ).printSchema()
root
|-- key: long (nullable = true)
|-- median: double (nullable = true)
"""
sc = SparkContext._active_spark_context
if isinstance(percentage, (list, tuple)):
# A local list
percentage = sc._jvm.functions.array(_to_seq(sc, [
_create_column_from_literal(x) for x in percentage
]))
elif isinstance(percentage, Column):
# Already a Column
percentage = _to_java_column(percentage)
else:
# Probably scalar
percentage = _create_column_from_literal(percentage)
accuracy = (
_to_java_column(accuracy) if isinstance(accuracy, Column)
else _create_column_from_literal(accuracy)
)
return Column(sc._jvm.functions.percentile_approx(_to_java_column(col), percentage, accuracy))
def rand(seed=None):
"""Generates a random column with independent and identically distributed (i.i.d.) samples
uniformly distributed in [0.0, 1.0).
.. versionadded:: 1.4.0
Notes
-----
The function is non-deterministic in general case.
Examples
--------
>>> df.withColumn('rand', rand(seed=42) * 3).collect()
[Row(age=2, name='Alice', rand=2.4052597283576684),
Row(age=5, name='Bob', rand=2.3913904055683974)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.rand(seed)
else:
jc = sc._jvm.functions.rand()
return Column(jc)
def randn(seed=None):
"""Generates a column with independent and identically distributed (i.i.d.) samples from
the standard normal distribution.
.. versionadded:: 1.4.0
Notes
-----
The function is non-deterministic in general case.
Examples
--------
>>> df.withColumn('randn', randn(seed=42)).collect()
[Row(age=2, name='Alice', randn=1.1027054481455365),
Row(age=5, name='Bob', randn=0.7400395449950132)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.randn(seed)
else:
jc = sc._jvm.functions.randn()
return Column(jc)
def round(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_UP rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([(2.5,)], ['a']).select(round('a', 0).alias('r')).collect()
[Row(r=3.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.round(_to_java_column(col), scale))
def bround(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_EVEN rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
.. versionadded:: 2.0.0
Examples
--------
>>> spark.createDataFrame([(2.5,)], ['a']).select(bround('a', 0).alias('r')).collect()
[Row(r=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.bround(_to_java_column(col), scale))
def shiftLeft(col, numBits):
"""Shift the given value numBits left.
.. versionadded:: 1.5.0
.. deprecated:: 3.2.0
Use :func:`shiftleft` instead.
"""
warnings.warn("Deprecated in 3.2, use shiftleft instead.", FutureWarning)
return shiftleft(col, numBits)
def shiftleft(col, numBits):
"""Shift the given value numBits left.
.. versionadded:: 3.2.0
Examples
--------
>>> spark.createDataFrame([(21,)], ['a']).select(shiftleft('a', 1).alias('r')).collect()
[Row(r=42)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shiftleft(_to_java_column(col), numBits))
def shiftRight(col, numBits):
"""(Signed) shift the given value numBits right.
.. versionadded:: 1.5.0
.. deprecated:: 3.2.0
Use :func:`shiftright` instead.
"""
warnings.warn("Deprecated in 3.2, use shiftright instead.", FutureWarning)
return shiftright(col, numBits)
def shiftright(col, numBits):
"""(Signed) shift the given value numBits right.
.. versionadded:: 3.2.0
Examples
--------
>>> spark.createDataFrame([(42,)], ['a']).select(shiftright('a', 1).alias('r')).collect()
[Row(r=21)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRight(_to_java_column(col), numBits)
return Column(jc)
def shiftRightUnsigned(col, numBits):
"""Unsigned shift the given value numBits right.
.. versionadded:: 1.5.0
.. deprecated:: 3.2.0
Use :func:`shiftrightunsigned` instead.
"""
warnings.warn("Deprecated in 3.2, use shiftrightunsigned instead.", FutureWarning)
return shiftrightunsigned(col, numBits)
def shiftrightunsigned(col, numBits):
"""Unsigned shift the given value numBits right.
.. versionadded:: 3.2.0
Examples
--------
>>> df = spark.createDataFrame([(-42,)], ['a'])
>>> df.select(shiftrightunsigned('a', 1).alias('r')).collect()
[Row(r=9223372036854775787)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRightUnsigned(_to_java_column(col), numBits)
return Column(jc)
def spark_partition_id():
"""A column for partition ID.
.. versionadded:: 1.6.0
Notes
-----
This is non deterministic because it depends on data partitioning and task scheduling.
Examples
--------
>>> df.repartition(1).select(spark_partition_id().alias("pid")).collect()
[Row(pid=0), Row(pid=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.spark_partition_id())
def expr(str):
"""Parses the expression string into the column that it represents
.. versionadded:: 1.5.0
Examples
--------
>>> df.select(expr("length(name)")).collect()
[Row(length(name)=5), Row(length(name)=3)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.expr(str))
def struct(*cols):
"""Creates a new struct column.
.. versionadded:: 1.4.0
Parameters
----------
cols : list, set, str or :class:`~pyspark.sql.Column`
column names or :class:`~pyspark.sql.Column`\\s to contain in the output struct.
Examples
--------
>>> df.select(struct('age', 'name').alias("struct")).collect()
[Row(struct=Row(age=2, name='Alice')), Row(struct=Row(age=5, name='Bob'))]
>>> df.select(struct([df.age, df.name]).alias("struct")).collect()
[Row(struct=Row(age=2, name='Alice')), Row(struct=Row(age=5, name='Bob'))]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.struct(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def greatest(*cols):
"""
Returns the greatest value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(greatest(df.a, df.b, df.c).alias("greatest")).collect()
[Row(greatest=4)]
"""
if len(cols) < 2:
raise ValueError("greatest should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.greatest(_to_seq(sc, cols, _to_java_column)))
def least(*cols):
"""
Returns the least value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(least(df.a, df.b, df.c).alias("least")).collect()
[Row(least=1)]
"""
if len(cols) < 2:
raise ValueError("least should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.least(_to_seq(sc, cols, _to_java_column)))
def when(condition, value):
"""Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`pyspark.sql.Column.otherwise` is not invoked, None is returned for unmatched
conditions.
.. versionadded:: 1.4.0
Parameters
----------
condition : :class:`~pyspark.sql.Column`
a boolean :class:`~pyspark.sql.Column` expression.
value :
a literal value, or a :class:`~pyspark.sql.Column` expression.
>>> df.select(when(df['age'] == 2, 3).otherwise(4).alias("age")).collect()
[Row(age=3), Row(age=4)]
>>> df.select(when(df.age == 2, df.age + 1).alias("age")).collect()
[Row(age=3), Row(age=None)]
"""
sc = SparkContext._active_spark_context
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = sc._jvm.functions.when(condition._jc, v)
return Column(jc)
def log(arg1, arg2=None):
"""Returns the first argument-based logarithm of the second argument.
If there is only one argument, then this takes the natural logarithm of the argument.
.. versionadded:: 1.5.0
Examples
--------
>>> df.select(log(10.0, df.age).alias('ten')).rdd.map(lambda l: str(l.ten)[:7]).collect()
['0.30102', '0.69897']
>>> df.select(log(df.age).alias('e')).rdd.map(lambda l: str(l.e)[:7]).collect()
['0.69314', '1.60943']
"""
sc = SparkContext._active_spark_context
if arg2 is None:
jc = sc._jvm.functions.log(_to_java_column(arg1))
else:
jc = sc._jvm.functions.log(arg1, _to_java_column(arg2))
return Column(jc)
def log2(col):
"""Returns the base-2 logarithm of the argument.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([(4,)], ['a']).select(log2('a').alias('log2')).collect()
[Row(log2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.log2(_to_java_column(col)))
def conv(col, fromBase, toBase):
"""
Convert a number in a string column from one base to another.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([("010101",)], ['n'])
>>> df.select(conv(df.n, 2, 16).alias('hex')).collect()
[Row(hex='15')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.conv(_to_java_column(col), fromBase, toBase))
def factorial(col):
"""
Computes the factorial of the given value.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([(5,)], ['n'])
>>> df.select(factorial(df.n).alias('f')).collect()
[Row(f=120)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.factorial(_to_java_column(col)))
# --------------- Window functions ------------------------
def lag(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows before the current row, and
`default` if there is less than `offset` rows before the current row. For example,
an `offset` of one will return the previous row at any given point in the window partition.
This is equivalent to the LAG function in SQL.
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
offset : int, optional
number of row to extend
default : optional
default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lag(_to_java_column(col), offset, default))
def lead(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows after the current row, and
`default` if there is less than `offset` rows after the current row. For example,
an `offset` of one will return the next row at any given point in the window partition.
This is equivalent to the LEAD function in SQL.
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
offset : int, optional
number of row to extend
default : optional
default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lead(_to_java_column(col), offset, default))
def nth_value(col, offset, ignoreNulls=False):
"""
Window function: returns the value that is the `offset`\\th row of the window frame
(counting from 1), and `null` if the size of window frame is less than `offset` rows.
It will return the `offset`\\th non-null value it sees when `ignoreNulls` is set to
true. If all values are null, then null is returned.
This is equivalent to the nth_value function in SQL.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
offset : int, optional
number of row to use as the value
ignoreNulls : bool, optional
indicates the Nth value should skip null in the
determination of which row to use
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.nth_value(_to_java_column(col), offset, ignoreNulls))
def ntile(n):
"""
Window function: returns the ntile group id (from 1 to `n` inclusive)
in an ordered window partition. For example, if `n` is 4, the first
quarter of the rows will get value 1, the second quarter will get 2,
the third quarter will get 3, and the last quarter will get 4.
This is equivalent to the NTILE function in SQL.
.. versionadded:: 1.4.0
Parameters
----------
n : int
an integer
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.ntile(int(n)))
# ---------------------- Date/Timestamp functions ------------------------------
@since(1.5)
def current_date():
"""
Returns the current date at the start of query evaluation as a :class:`DateType` column.
All calls of current_date within the same query return the same value.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_date())
def current_timestamp():
"""
Returns the current timestamp at the start of query evaluation as a :class:`TimestampType`
column. All calls of current_timestamp within the same query return the same value.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_timestamp())
def date_format(date, format):
"""
Converts a date/timestamp/string to a value of string in the format specified by the date
format given by the second argument.
A pattern could be for instance `dd.MM.yyyy` and could return a string like '18.03.1993'. All
pattern letters of `datetime pattern`_. can be used.
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
.. versionadded:: 1.5.0
Notes
-----
Whenever possible, use specialized functions like `year`.
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_format('dt', 'MM/dd/yyy').alias('date')).collect()
[Row(date='04/08/2015')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_format(_to_java_column(date), format))
def year(col):
"""
Extract the year of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(year('dt').alias('year')).collect()
[Row(year=2015)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.year(_to_java_column(col)))
def quarter(col):
"""
Extract the quarter of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(quarter('dt').alias('quarter')).collect()
[Row(quarter=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.quarter(_to_java_column(col)))
def month(col):
"""
Extract the month of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(month('dt').alias('month')).collect()
[Row(month=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.month(_to_java_column(col)))
def dayofweek(col):
"""
Extract the day of the week of a given date as integer.
.. versionadded:: 2.3.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofweek('dt').alias('day')).collect()
[Row(day=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofweek(_to_java_column(col)))
def dayofmonth(col):
"""
Extract the day of the month of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofmonth('dt').alias('day')).collect()
[Row(day=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofmonth(_to_java_column(col)))
def dayofyear(col):
"""
Extract the day of the year of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofyear('dt').alias('day')).collect()
[Row(day=98)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofyear(_to_java_column(col)))
def hour(col):
"""
Extract the hours of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(hour('ts').alias('hour')).collect()
[Row(hour=13)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.hour(_to_java_column(col)))
def minute(col):
"""
Extract the minutes of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(minute('ts').alias('minute')).collect()
[Row(minute=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.minute(_to_java_column(col)))
def second(col):
"""
Extract the seconds of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(second('ts').alias('second')).collect()
[Row(second=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.second(_to_java_column(col)))
def weekofyear(col):
"""
Extract the week number of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(weekofyear(df.dt).alias('week')).collect()
[Row(week=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.weekofyear(_to_java_column(col)))
def date_add(start, days):
"""
Returns the date that is `days` days after `start`
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_add(df.dt, 1).alias('next_date')).collect()
[Row(next_date=datetime.date(2015, 4, 9))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_add(_to_java_column(start), days))
def date_sub(start, days):
"""
Returns the date that is `days` days before `start`
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_sub(df.dt, 1).alias('prev_date')).collect()
[Row(prev_date=datetime.date(2015, 4, 7))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_sub(_to_java_column(start), days))
def datediff(end, start):
"""
Returns the number of days from `start` to `end`.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2'])
>>> df.select(datediff(df.d2, df.d1).alias('diff')).collect()
[Row(diff=32)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.datediff(_to_java_column(end), _to_java_column(start)))
def add_months(start, months):
"""
Returns the date that is `months` months after `start`
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(add_months(df.dt, 1).alias('next_month')).collect()
[Row(next_month=datetime.date(2015, 5, 8))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.add_months(_to_java_column(start), months))
def months_between(date1, date2, roundOff=True):
"""
Returns number of months between dates date1 and date2.
If date1 is later than date2, then the result is positive.
If date1 and date2 are on the same day of month, or both are the last day of month,
returns an integer (time of day will be ignored).
The result is rounded off to 8 digits unless `roundOff` is set to `False`.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', '1996-10-30')], ['date1', 'date2'])
>>> df.select(months_between(df.date1, df.date2).alias('months')).collect()
[Row(months=3.94959677)]
>>> df.select(months_between(df.date1, df.date2, False).alias('months')).collect()
[Row(months=3.9495967741935485)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.months_between(
_to_java_column(date1), _to_java_column(date2), roundOff))
def to_date(col, format=None):
"""Converts a :class:`~pyspark.sql.Column` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to `datetime pattern`_.
By default, it follows casting rules to :class:`pyspark.sql.types.DateType` if the format
is omitted. Equivalent to ``col.cast("date")``.
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
.. versionadded:: 2.2.0
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t, 'yyyy-MM-dd HH:mm:ss').alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_date(_to_java_column(col))
else:
jc = sc._jvm.functions.to_date(_to_java_column(col), format)
return Column(jc)
def to_timestamp(col, format=None):
"""Converts a :class:`~pyspark.sql.Column` into :class:`pyspark.sql.types.TimestampType`
using the optionally specified format. Specify formats according to `datetime pattern`_.
By default, it follows casting rules to :class:`pyspark.sql.types.TimestampType` if the format
is omitted. Equivalent to ``col.cast("timestamp")``.
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
.. versionadded:: 2.2.0
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t).alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t, 'yyyy-MM-dd HH:mm:ss').alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col))
else:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col), format)
return Column(jc)
def trunc(date, format):
"""
Returns date truncated to the unit specified by the format.
.. versionadded:: 1.5.0
Parameters
----------
date : :class:`~pyspark.sql.Column` or str
format : str
'year', 'yyyy', 'yy' or 'month', 'mon', 'mm'
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28',)], ['d'])
>>> df.select(trunc(df.d, 'year').alias('year')).collect()
[Row(year=datetime.date(1997, 1, 1))]
>>> df.select(trunc(df.d, 'mon').alias('month')).collect()
[Row(month=datetime.date(1997, 2, 1))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.trunc(_to_java_column(date), format))
def date_trunc(format, timestamp):
"""
Returns timestamp truncated to the unit specified by the format.
.. versionadded:: 2.3.0
Parameters
----------
format : str
'year', 'yyyy', 'yy', 'month', 'mon', 'mm',
'day', 'dd', 'hour', 'minute', 'second', 'week', 'quarter'
timestamp : :class:`~pyspark.sql.Column` or str
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28 05:02:11',)], ['t'])
>>> df.select(date_trunc('year', df.t).alias('year')).collect()
[Row(year=datetime.datetime(1997, 1, 1, 0, 0))]
>>> df.select(date_trunc('mon', df.t).alias('month')).collect()
[Row(month=datetime.datetime(1997, 2, 1, 0, 0))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_trunc(format, _to_java_column(timestamp)))
def next_day(date, dayOfWeek):
"""
Returns the first date which is later than the value of the date column.
Day of the week parameter is case insensitive, and accepts:
"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun".
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-07-27',)], ['d'])
>>> df.select(next_day(df.d, 'Sun').alias('date')).collect()
[Row(date=datetime.date(2015, 8, 2))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.next_day(_to_java_column(date), dayOfWeek))
def last_day(date):
"""
Returns the last day of the month which the given date belongs to.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('1997-02-10',)], ['d'])
>>> df.select(last_day(df.d).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.last_day(_to_java_column(date)))
def from_unixtime(timestamp, format="yyyy-MM-dd HH:mm:ss"):
"""
Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string
representing the timestamp of that moment in the current system time zone in the given
format.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([(1428476400,)], ['unix_time'])
>>> time_df.select(from_unixtime('unix_time').alias('ts')).collect()
[Row(ts='2015-04-08 00:00:00')]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.from_unixtime(_to_java_column(timestamp), format))
def unix_timestamp(timestamp=None, format='yyyy-MM-dd HH:mm:ss'):
"""
Convert time string with given pattern ('yyyy-MM-dd HH:mm:ss', by default)
to Unix time stamp (in seconds), using the default timezone and the default
locale, return null if fail.
if `timestamp` is None, then it returns current timestamp.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> time_df.select(unix_timestamp('dt', 'yyyy-MM-dd').alias('unix_time')).collect()
[Row(unix_time=1428476400)]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
if timestamp is None:
return Column(sc._jvm.functions.unix_timestamp())
return Column(sc._jvm.functions.unix_timestamp(_to_java_column(timestamp), format))
def from_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and
renders that timestamp as a timestamp in the given time zone.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to
the given timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
.. versionadded:: 1.5.0
Parameters
----------
timestamp : :class:`~pyspark.sql.Column` or str
the column that contains timestamps
tz : :class:`~pyspark.sql.Column` or str
A string detailing the time zone ID that the input should be adjusted to. It should
be in the format of either region-based zone IDs or zone offsets. Region IDs must
have the form 'area/city', such as 'America/Los_Angeles'. Zone offsets must be in
the format '(+|-)HH:mm', for example '-08:00' or '+01:00'. Also 'UTC' and 'Z' are
supported as aliases of '+00:00'. Other short names are not recommended to use
because they can be ambiguous.
.. versionchanged:: 2.4
`tz` can take a :class:`~pyspark.sql.Column` containing timezone ID strings.
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(from_utc_timestamp(df.ts, "PST").alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))]
>>> df.select(from_utc_timestamp(df.ts, df.tz).alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 19, 30))]
"""
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.from_utc_timestamp(_to_java_column(timestamp), tz))
def to_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in the given
timezone, and renders that timestamp as a timestamp in UTC.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from the given
timezone to UTC timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
.. versionadded:: 1.5.0
Parameters
----------
timestamp : :class:`~pyspark.sql.Column` or str
the column that contains timestamps
tz : :class:`~pyspark.sql.Column` or str
A string detailing the time zone ID that the input should be adjusted to. It should
be in the format of either region-based zone IDs or zone offsets. Region IDs must
have the form 'area/city', such as 'America/Los_Angeles'. Zone offsets must be in
the format '(+|-)HH:mm', for example '-08:00' or '+01:00'. Also 'UTC' and 'Z' are
upported as aliases of '+00:00'. Other short names are not recommended to use
because they can be ambiguous.
.. versionchanged:: 2.4.0
`tz` can take a :class:`~pyspark.sql.Column` containing timezone ID strings.
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(to_utc_timestamp(df.ts, "PST").alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 18, 30))]
>>> df.select(to_utc_timestamp(df.ts, df.tz).alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 1, 30))]
"""
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.to_utc_timestamp(_to_java_column(timestamp), tz))
def timestamp_seconds(col):
"""
.. versionadded:: 3.1.0
Examples
--------
>>> from pyspark.sql.functions import timestamp_seconds
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([(1230219000,)], ['unix_time'])
>>> time_df.select(timestamp_seconds(time_df.unix_time).alias('ts')).show()
+-------------------+
| ts|
+-------------------+
|2008-12-25 07:30:00|
+-------------------+
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.timestamp_seconds(_to_java_column(col)))
def window(timeColumn, windowDuration, slideDuration=None, startTime=None):
"""Bucketize rows into one or more time windows given a timestamp specifying column. Window
starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window
[12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in
the order of months are not supported.
The time column must be of :class:`pyspark.sql.types.TimestampType`.
Durations are provided as strings, e.g. '1 second', '1 day 12 hours', '2 minutes'. Valid
interval strings are 'week', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond'.
If the ``slideDuration`` is not provided, the windows will be tumbling windows.
The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start
window intervals. For example, in order to have hourly tumbling windows that start 15 minutes
past the hour, e.g. 12:15-13:15, 13:15-14:15... provide `startTime` as `15 minutes`.
The output column will be a struct called 'window' by default with the nested columns 'start'
and 'end', where 'start' and 'end' will be of :class:`pyspark.sql.types.TimestampType`.
.. versionadded:: 2.0.0
Examples
--------
>>> df = spark.createDataFrame([("2016-03-11 09:00:07", 1)]).toDF("date", "val")
>>> w = df.groupBy(window("date", "5 seconds")).agg(sum("val").alias("sum"))
>>> w.select(w.window.start.cast("string").alias("start"),
... w.window.end.cast("string").alias("end"), "sum").collect()
[Row(start='2016-03-11 09:00:05', end='2016-03-11 09:00:10', sum=1)]
"""
def check_string_field(field, fieldName):
if not field or type(field) is not str:
raise TypeError("%s should be provided as a string" % fieldName)
sc = SparkContext._active_spark_context
time_col = _to_java_column(timeColumn)
check_string_field(windowDuration, "windowDuration")
if slideDuration and startTime:
check_string_field(slideDuration, "slideDuration")
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration, startTime)
elif slideDuration:
check_string_field(slideDuration, "slideDuration")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration)
elif startTime:
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, windowDuration, startTime)
else:
res = sc._jvm.functions.window(time_col, windowDuration)
return Column(res)
# ---------------------------- misc functions ----------------------------------
def crc32(col):
"""
Calculates the cyclic redundancy check value (CRC32) of a binary column and
returns the value as a bigint.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('ABC',)], ['a']).select(crc32('a').alias('crc32')).collect()
[Row(crc32=2743272264)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.crc32(_to_java_column(col)))
def md5(col):
"""Calculates the MD5 digest and returns the value as a 32 character hex string.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('ABC',)], ['a']).select(md5('a').alias('hash')).collect()
[Row(hash='902fbdd2b1df0c4f70b4a5d23525e932')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.md5(_to_java_column(col))
return Column(jc)
def sha1(col):
"""Returns the hex string result of SHA-1.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('ABC',)], ['a']).select(sha1('a').alias('hash')).collect()
[Row(hash='3c01bdbb26f358bab27f267924aa2c9a03fcfdb8')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha1(_to_java_column(col))
return Column(jc)
def sha2(col, numBits):
"""Returns the hex string result of SHA-2 family of hash functions (SHA-224, SHA-256, SHA-384,
and SHA-512). The numBits indicates the desired bit length of the result, which must have a
value of 224, 256, 384, 512, or 0 (which is equivalent to 256).
.. versionadded:: 1.5.0
Examples
--------
>>> digests = df.select(sha2(df.name, 256).alias('s')).collect()
>>> digests[0]
Row(s='3bc51062973c458d5a6f2d8d64a023246354ad7e064b1e4e009ec8a0699a3043')
>>> digests[1]
Row(s='cd9fb1e148ccd8442e5aa74904cc73bf6fb54d1d54d333bd596aa9bb4bb4e961')
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha2(_to_java_column(col), numBits)
return Column(jc)
def hash(*cols):
"""Calculates the hash code of given columns, and returns the result as an int column.
.. versionadded:: 2.0.0
Examples
--------
>>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect()
[Row(hash=-757602832)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hash(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def xxhash64(*cols):
"""Calculates the hash code of given columns using the 64-bit variant of the xxHash algorithm,
and returns the result as a long column.
.. versionadded:: 3.0.0
Examples
--------
>>> spark.createDataFrame([('ABC',)], ['a']).select(xxhash64('a').alias('hash')).collect()
[Row(hash=4105715581806190027)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.xxhash64(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def assert_true(col, errMsg=None):
"""
Returns null if the input column is true; throws an exception with the provided error message
otherwise.
.. versionadded:: 3.1.0
Examples
--------
>>> df = spark.createDataFrame([(0,1)], ['a', 'b'])
>>> df.select(assert_true(df.a < df.b).alias('r')).collect()
[Row(r=None)]
>>> df = spark.createDataFrame([(0,1)], ['a', 'b'])
>>> df.select(assert_true(df.a < df.b, df.a).alias('r')).collect()
[Row(r=None)]
>>> df = spark.createDataFrame([(0,1)], ['a', 'b'])
>>> df.select(assert_true(df.a < df.b, 'error').alias('r')).collect()
[Row(r=None)]
"""
sc = SparkContext._active_spark_context
if errMsg is None:
return Column(sc._jvm.functions.assert_true(_to_java_column(col)))
if not isinstance(errMsg, (str, Column)):
raise TypeError(
"errMsg should be a Column or a str, got {}".format(type(errMsg))
)
errMsg = (
_create_column_from_literal(errMsg)
if isinstance(errMsg, str)
else _to_java_column(errMsg)
)
return Column(sc._jvm.functions.assert_true(_to_java_column(col), errMsg))
@since(3.1)
def raise_error(errMsg):
"""
Throws an exception with the provided error message.
"""
if not isinstance(errMsg, (str, Column)):
raise TypeError(
"errMsg should be a Column or a str, got {}".format(type(errMsg))
)
sc = SparkContext._active_spark_context
errMsg = (
_create_column_from_literal(errMsg)
if isinstance(errMsg, str)
else _to_java_column(errMsg)
)
return Column(sc._jvm.functions.raise_error(errMsg))
# ---------------------- String/Binary functions ------------------------------
@since(1.5)
def upper(col):
"""
Converts a string expression to upper case.
"""
return _invoke_function_over_column("upper", col)
@since(1.5)
def lower(col):
"""
Converts a string expression to lower case.
"""
return _invoke_function_over_column("lower", col)
@since(1.5)
def ascii(col):
"""
Computes the numeric value of the first character of the string column.
"""
return _invoke_function_over_column("ascii", col)
@since(1.5)
def base64(col):
"""
Computes the BASE64 encoding of a binary column and returns it as a string column.
"""
return _invoke_function_over_column("base64", col)
@since(1.5)
def unbase64(col):
"""
Decodes a BASE64 encoded string column and returns it as a binary column.
"""
return _invoke_function_over_column("unbase64", col)
@since(1.5)
def ltrim(col):
"""
Trim the spaces from left end for the specified string value.
"""
return _invoke_function_over_column("ltrim", col)
@since(1.5)
def rtrim(col):
"""
Trim the spaces from right end for the specified string value.
"""
return _invoke_function_over_column("rtrim", col)
@since(1.5)
def trim(col):
"""
Trim the spaces from both ends for the specified string column.
"""
return _invoke_function_over_column("trim", col)
def concat_ws(sep, *cols):
"""
Concatenates multiple input string columns together into a single string column,
using the given separator.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat_ws('-', df.s, df.d).alias('s')).collect()
[Row(s='abcd-123')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat_ws(sep, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def decode(col, charset):
"""
Computes the first argument into a string from a binary using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.decode(_to_java_column(col), charset))
@since(1.5)
def encode(col, charset):
"""
Computes the first argument into a binary from a string using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.encode(_to_java_column(col), charset))
def format_number(col, d):
"""
Formats the number X to a format like '#,--#,--#.--', rounded to d decimal places
with HALF_EVEN round mode, and returns the result as a string.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
the column name of the numeric value to be formatted
d : int
the N decimal places
>>> spark.createDataFrame([(5,)], ['a']).select(format_number('a', 4).alias('v')).collect()
[Row(v='5.0000')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_number(_to_java_column(col), d))
def format_string(format, *cols):
"""
Formats the arguments in printf-style and returns the result as a string column.
.. versionadded:: 1.5.0
Parameters
----------
format : str
string that can contain embedded format tags and used as result column's value
cols : :class:`~pyspark.sql.Column` or str
column names or :class:`~pyspark.sql.Column`\\s to be used in formatting
Examples
--------
>>> df = spark.createDataFrame([(5, "hello")], ['a', 'b'])
>>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect()
[Row(v='5 hello')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_string(format, _to_seq(sc, cols, _to_java_column)))
def instr(str, substr):
"""
Locate the position of the first occurrence of substr column in the given string.
Returns null if either of the arguments are null.
.. versionadded:: 1.5.0
Notes
-----
The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(instr(df.s, 'b').alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.instr(_to_java_column(str), substr))
def overlay(src, replace, pos, len=-1):
"""
Overlay the specified portion of `src` with `replace`,
starting from byte position `pos` of `src` and proceeding for `len` bytes.
.. versionadded:: 3.0.0
Examples
--------
>>> df = spark.createDataFrame([("SPARK_SQL", "CORE")], ("x", "y"))
>>> df.select(overlay("x", "y", 7).alias("overlayed")).show()
+----------+
| overlayed|
+----------+
|SPARK_CORE|
+----------+
"""
if not isinstance(pos, (int, str, Column)):
raise TypeError(
"pos should be an integer or a Column / column name, got {}".format(type(pos)))
if len is not None and not isinstance(len, (int, str, Column)):
raise TypeError(
"len should be an integer or a Column / column name, got {}".format(type(len)))
pos = _create_column_from_literal(pos) if isinstance(pos, int) else _to_java_column(pos)
len = _create_column_from_literal(len) if isinstance(len, int) else _to_java_column(len)
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.overlay(
_to_java_column(src),
_to_java_column(replace),
pos,
len
))
def sentences(string, language=None, country=None):
"""
Splits a string into arrays of sentences, where each sentence is an array of words.
The 'language' and 'country' arguments are optional, and if omitted, the default locale is used.
.. versionadded:: 3.2.0
Parameters
----------
string : :class:`~pyspark.sql.Column` or str
a string to be split
language : :class:`~pyspark.sql.Column` or str, optional
a language of the locale
country : :class:`~pyspark.sql.Column` or str, optional
a country of the locale
Examples
--------
>>> df = spark.createDataFrame([["This is an example sentence."]], ["string"])
>>> df.select(sentences(df.string, lit("en"), lit("US"))).show(truncate=False)
+-----------------------------------+
|sentences(string, en, US) |
+-----------------------------------+
|[[This, is, an, example, sentence]]|
+-----------------------------------+
"""
if language is None:
language = lit("")
if country is None:
country = lit("")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.sentences(
_to_java_column(string),
_to_java_column(language),
_to_java_column(country)
))
def substring(str, pos, len):
"""
Substring starts at `pos` and is of length `len` when str is String type or
returns the slice of byte array that starts at `pos` in byte and is of length `len`
when str is Binary type.
.. versionadded:: 1.5.0
Notes
-----
The position is not zero based, but 1 based index.
Examples
--------
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(substring(df.s, 1, 2).alias('s')).collect()
[Row(s='ab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring(_to_java_column(str), pos, len))
def substring_index(str, delim, count):
"""
Returns the substring from string str before count occurrences of the delimiter delim.
If count is positive, everything the left of the final delimiter (counting from left) is
returned. If count is negative, every to the right of the final delimiter (counting from the
right) is returned. substring_index performs a case-sensitive match when searching for delim.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('a.b.c.d',)], ['s'])
>>> df.select(substring_index(df.s, '.', 2).alias('s')).collect()
[Row(s='a.b')]
>>> df.select(substring_index(df.s, '.', -3).alias('s')).collect()
[Row(s='b.c.d')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring_index(_to_java_column(str), delim, count))
def levenshtein(left, right):
"""Computes the Levenshtein distance of the two given strings.
.. versionadded:: 1.5.0
Examples
--------
>>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r'])
>>> df0.select(levenshtein('l', 'r').alias('d')).collect()
[Row(d=3)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right))
return Column(jc)
def locate(substr, str, pos=1):
"""
Locate the position of the first occurrence of substr in a string column, after position pos.
.. versionadded:: 1.5.0
Parameters
----------
substr : str
a string
str : :class:`~pyspark.sql.Column` or str
a Column of :class:`pyspark.sql.types.StringType`
pos : int, optional
start position (zero based)
Notes
-----
The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
Examples
--------
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(locate('b', df.s, 1).alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.locate(substr, _to_java_column(str), pos))
def lpad(col, len, pad):
"""
Left-pad the string column to width `len` with `pad`.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(lpad(df.s, 6, '#').alias('s')).collect()
[Row(s='##abcd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad))
def rpad(col, len, pad):
"""
Right-pad the string column to width `len` with `pad`.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(rpad(df.s, 6, '#').alias('s')).collect()
[Row(s='abcd##')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.rpad(_to_java_column(col), len, pad))
def repeat(col, n):
"""
Repeats a string column n times, and returns it as a new string column.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('ab',)], ['s',])
>>> df.select(repeat(df.s, 3).alias('s')).collect()
[Row(s='ababab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.repeat(_to_java_column(col), n))
def split(str, pattern, limit=-1):
"""
Splits str around matches of the given pattern.
.. versionadded:: 1.5.0
Parameters
----------
str : :class:`~pyspark.sql.Column` or str
a string expression to split
pattern : str
a string representing a regular expression. The regex string should be
a Java regular expression.
limit : int, optional
an integer which controls the number of times `pattern` is applied.
* ``limit > 0``: The resulting array's length will not be more than `limit`, and the
resulting array's last entry will contain all input beyond the last
matched pattern.
* ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting
array can be of any size.
.. versionchanged:: 3.0
`split` now takes an optional `limit` field. If not provided, default limit value is -1.
Examples
--------
>>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',])
>>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect()
[Row(s=['one', 'twoBthreeC'])]
>>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect()
[Row(s=['one', 'two', 'three', ''])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.split(_to_java_column(str), pattern, limit))
def regexp_extract(str, pattern, idx):
r"""Extract a specific group matched by a Java regex, from the specified string column.
If the regex did not match, or the specified group did not match, an empty string is returned.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect()
[Row(d='100')]
>>> df = spark.createDataFrame([('foo',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)', 1).alias('d')).collect()
[Row(d='')]
>>> df = spark.createDataFrame([('aaaac',)], ['str'])
>>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect()
[Row(d='')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_extract(_to_java_column(str), pattern, idx)
return Column(jc)
def regexp_replace(str, pattern, replacement):
r"""Replace all substrings of the specified string value that match regexp with rep.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_replace('str', r'(\d+)', '--').alias('d')).collect()
[Row(d='-----')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement)
return Column(jc)
def initcap(col):
"""Translate the first letter of each word to upper case in the sentence.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('ab cd',)], ['a']).select(initcap("a").alias('v')).collect()
[Row(v='Ab Cd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.initcap(_to_java_column(col)))
def soundex(col):
"""
Returns the SoundEx encoding for a string
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([("Peters",),("Uhrbach",)], ['name'])
>>> df.select(soundex(df.name).alias("soundex")).collect()
[Row(soundex='P362'), Row(soundex='U612')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.soundex(_to_java_column(col)))
def bin(col):
"""Returns the string representation of the binary value of the given column.
.. versionadded:: 1.5.0
Examples
--------
>>> df.select(bin(df.age).alias('c')).collect()
[Row(c='10'), Row(c='101')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.bin(_to_java_column(col))
return Column(jc)
def hex(col):
"""Computes hex value of the given column, which could be :class:`pyspark.sql.types.StringType`,
:class:`pyspark.sql.types.BinaryType`, :class:`pyspark.sql.types.IntegerType` or
:class:`pyspark.sql.types.LongType`.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('ABC', 3)], ['a', 'b']).select(hex('a'), hex('b')).collect()
[Row(hex(a)='414243', hex(b)='3')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hex(_to_java_column(col))
return Column(jc)
def unhex(col):
"""Inverse of hex. Interprets each pair of characters as a hexadecimal number
and converts to the byte representation of number.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('414243',)], ['a']).select(unhex('a')).collect()
[Row(unhex(a)=bytearray(b'ABC'))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.unhex(_to_java_column(col)))
def length(col):
"""Computes the character length of string data or number of bytes of binary data.
The length of character data includes the trailing spaces. The length of binary data
includes binary zeros.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('ABC ',)], ['a']).select(length('a').alias('length')).collect()
[Row(length=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.length(_to_java_column(col)))
def translate(srcCol, matching, replace):
"""A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `matching`.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\
... .alias('r')).collect()
[Row(r='1a2s3ae')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace))
# ---------------------- Collection functions ------------------------------
def create_map(*cols):
"""Creates a new map column.
.. versionadded:: 2.0.0
Parameters
----------
cols : :class:`~pyspark.sql.Column` or str
column names or :class:`~pyspark.sql.Column`\\s that are
grouped as key-value pairs, e.g. (key1, value1, key2, value2, ...).
Examples
--------
>>> df.select(create_map('name', 'age').alias("map")).collect()
[Row(map={'Alice': 2}), Row(map={'Bob': 5})]
>>> df.select(create_map([df.name, df.age]).alias("map")).collect()
[Row(map={'Alice': 2}), Row(map={'Bob': 5})]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def map_from_arrays(col1, col2):
"""Creates a new map from two arrays.
.. versionadded:: 2.4.0
Parameters
----------
col1 : :class:`~pyspark.sql.Column` or str
name of column containing a set of keys. All elements should not be null
col2 : :class:`~pyspark.sql.Column` or str
name of column containing a set of values
Examples
--------
>>> df = spark.createDataFrame([([2, 5], ['a', 'b'])], ['k', 'v'])
>>> df.select(map_from_arrays(df.k, df.v).alias("map")).show()
+----------------+
| map|
+----------------+
|{2 -> a, 5 -> b}|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_arrays(_to_java_column(col1), _to_java_column(col2)))
def array(*cols):
"""Creates a new array column.
.. versionadded:: 1.4.0
Parameters
----------
cols : :class:`~pyspark.sql.Column` or str
column names or :class:`~pyspark.sql.Column`\\s that have
the same data type.
Examples
--------
>>> df.select(array('age', 'age').alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
>>> df.select(array([df.age, df.age]).alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.array(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def array_contains(col, value):
"""
Collection function: returns null if the array is null, true if the array contains the
given value, and false otherwise.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column containing array
value :
value or column to check for in array
Examples
--------
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(array_contains(df.data, "a")).collect()
[Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)]
>>> df.select(array_contains(df.data, lit("a"))).collect()
[Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)]
"""
sc = SparkContext._active_spark_context
value = value._jc if isinstance(value, Column) else value
return Column(sc._jvm.functions.array_contains(_to_java_column(col), value))
def arrays_overlap(a1, a2):
"""
Collection function: returns true if the arrays contain any common non-null element; if not,
returns null if both the arrays are non-empty and any of them contains a null element; returns
false otherwise.
.. versionadded:: 2.4.0
Examples
--------
>>> df = spark.createDataFrame([(["a", "b"], ["b", "c"]), (["a"], ["b", "c"])], ['x', 'y'])
>>> df.select(arrays_overlap(df.x, df.y).alias("overlap")).collect()
[Row(overlap=True), Row(overlap=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_overlap(_to_java_column(a1), _to_java_column(a2)))
def slice(x, start, length):
"""
Collection function: returns an array containing all the elements in `x` from index `start`
(array indices start at 1, or from the end if `start` is negative) with the specified `length`.
.. versionadded:: 2.4.0
Parameters
----------
x : :class:`~pyspark.sql.Column` or str
the array to be sliced
start : :class:`~pyspark.sql.Column` or int
the starting index
length : :class:`~pyspark.sql.Column` or int
the length of the slice
Examples
--------
>>> df = spark.createDataFrame([([1, 2, 3],), ([4, 5],)], ['x'])
>>> df.select(slice(df.x, 2, 2).alias("sliced")).collect()
[Row(sliced=[2, 3]), Row(sliced=[5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.slice(
_to_java_column(x),
start._jc if isinstance(start, Column) else start,
length._jc if isinstance(length, Column) else length
))
def array_join(col, delimiter, null_replacement=None):
"""
Concatenates the elements of `column` using the `delimiter`. Null values are replaced with
`null_replacement` if set, otherwise they are ignored.
.. versionadded:: 2.4.0
Examples
--------
>>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data'])
>>> df.select(array_join(df.data, ",").alias("joined")).collect()
[Row(joined='a,b,c'), Row(joined='a')]
>>> df.select(array_join(df.data, ",", "NULL").alias("joined")).collect()
[Row(joined='a,b,c'), Row(joined='a,NULL')]
"""
sc = SparkContext._active_spark_context
if null_replacement is None:
return Column(sc._jvm.functions.array_join(_to_java_column(col), delimiter))
else:
return Column(sc._jvm.functions.array_join(
_to_java_column(col), delimiter, null_replacement))
def concat(*cols):
"""
Concatenates multiple input columns together into a single column.
The function works with strings, binary and compatible array columns.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat(df.s, df.d).alias('s')).collect()
[Row(s='abcd123')]
>>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c'])
>>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect()
[Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column)))
def array_position(col, value):
"""
Collection function: Locates the position of the first occurrence of the given value
in the given array. Returns null if either of the arguments are null.
.. versionadded:: 2.4.0
Notes
-----
The position is not zero based, but 1 based index. Returns 0 if the given
value could not be found in the array.
Examples
--------
>>> df = spark.createDataFrame([(["c", "b", "a"],), ([],)], ['data'])
>>> df.select(array_position(df.data, "a")).collect()
[Row(array_position(data, a)=3), Row(array_position(data, a)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_position(_to_java_column(col), value))
def element_at(col, extraction):
"""
Collection function: Returns element of array at given index in extraction if col is array.
Returns value for the given key in extraction if col is map.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column containing array or map
extraction :
index to check for in array or key to check for in map
Notes
-----
The position is not zero based, but 1 based index.
Examples
--------
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(element_at(df.data, 1)).collect()
[Row(element_at(data, 1)='a'), Row(element_at(data, 1)=None)]
>>> df = spark.createDataFrame([({"a": 1.0, "b": 2.0},), ({},)], ['data'])
>>> df.select(element_at(df.data, lit("a"))).collect()
[Row(element_at(data, a)=1.0), Row(element_at(data, a)=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.element_at(
_to_java_column(col), lit(extraction)._jc))
def array_remove(col, element):
"""
Collection function: Remove all elements that equal to element from the given array.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column containing array
element :
element to be removed from the array
Examples
--------
>>> df = spark.createDataFrame([([1, 2, 3, 1, 1],), ([],)], ['data'])
>>> df.select(array_remove(df.data, 1)).collect()
[Row(array_remove(data, 1)=[2, 3]), Row(array_remove(data, 1)=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_remove(_to_java_column(col), element))
def array_distinct(col):
"""
Collection function: removes duplicate values from the array.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([([1, 2, 3, 2],), ([4, 5, 5, 4],)], ['data'])
>>> df.select(array_distinct(df.data)).collect()
[Row(array_distinct(data)=[1, 2, 3]), Row(array_distinct(data)=[4, 5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_distinct(_to_java_column(col)))
def array_intersect(col1, col2):
"""
Collection function: returns an array of the elements in the intersection of col1 and col2,
without duplicates.
.. versionadded:: 2.4.0
Parameters
----------
col1 : :class:`~pyspark.sql.Column` or str
name of column containing array
col2 : :class:`~pyspark.sql.Column` or str
name of column containing array
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_intersect(df.c1, df.c2)).collect()
[Row(array_intersect(c1, c2)=['a', 'c'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_intersect(_to_java_column(col1), _to_java_column(col2)))
def array_union(col1, col2):
"""
Collection function: returns an array of the elements in the union of col1 and col2,
without duplicates.
.. versionadded:: 2.4.0
Parameters
----------
col1 : :class:`~pyspark.sql.Column` or str
name of column containing array
col2 : :class:`~pyspark.sql.Column` or str
name of column containing array
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_union(df.c1, df.c2)).collect()
[Row(array_union(c1, c2)=['b', 'a', 'c', 'd', 'f'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_union(_to_java_column(col1), _to_java_column(col2)))
def array_except(col1, col2):
"""
Collection function: returns an array of the elements in col1 but not in col2,
without duplicates.
.. versionadded:: 2.4.0
Parameters
----------
col1 : :class:`~pyspark.sql.Column` or str
name of column containing array
col2 : :class:`~pyspark.sql.Column` or str
name of column containing array
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_except(df.c1, df.c2)).collect()
[Row(array_except(c1, c2)=['b'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_except(_to_java_column(col1), _to_java_column(col2)))
def explode(col):
"""
Returns a new row for each element in the given array or map.
Uses the default column name `col` for elements in the array and
`key` and `value` for elements in the map unless specified otherwise.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(explode(eDF.intlist).alias("anInt")).collect()
[Row(anInt=1), Row(anInt=2), Row(anInt=3)]
>>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
+---+-----+
|key|value|
+---+-----+
| a| b|
+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode(_to_java_column(col))
return Column(jc)
def posexplode(col):
"""
Returns a new row for each element with position in the given array or map.
Uses the default column name `pos` for position, and `col` for elements in the
array and `key` and `value` for elements in the map unless specified otherwise.
.. versionadded:: 2.1.0
Examples
--------
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(posexplode(eDF.intlist)).collect()
[Row(pos=0, col=1), Row(pos=1, col=2), Row(pos=2, col=3)]
>>> eDF.select(posexplode(eDF.mapfield)).show()
+---+---+-----+
|pos|key|value|
+---+---+-----+
| 0| a| b|
+---+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode(_to_java_column(col))
return Column(jc)
def explode_outer(col):
"""
Returns a new row for each element in the given array or map.
Unlike explode, if the array/map is null or empty then null is produced.
Uses the default column name `col` for elements in the array and
`key` and `value` for elements in the map unless specified otherwise.
.. versionadded:: 2.3.0
Examples
--------
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", explode_outer("a_map")).show()
+---+----------+----+-----+
| id| an_array| key|value|
+---+----------+----+-----+
| 1|[foo, bar]| x| 1.0|
| 2| []|null| null|
| 3| null|null| null|
+---+----------+----+-----+
>>> df.select("id", "a_map", explode_outer("an_array")).show()
+---+----------+----+
| id| a_map| col|
+---+----------+----+
| 1|{x -> 1.0}| foo|
| 1|{x -> 1.0}| bar|
| 2| {}|null|
| 3| null|null|
+---+----------+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode_outer(_to_java_column(col))
return Column(jc)
def posexplode_outer(col):
"""
Returns a new row for each element with position in the given array or map.
Unlike posexplode, if the array/map is null or empty then the row (null, null) is produced.
Uses the default column name `pos` for position, and `col` for elements in the
array and `key` and `value` for elements in the map unless specified otherwise.
.. versionadded:: 2.3.0
Examples
--------
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", posexplode_outer("a_map")).show()
+---+----------+----+----+-----+
| id| an_array| pos| key|value|
+---+----------+----+----+-----+
| 1|[foo, bar]| 0| x| 1.0|
| 2| []|null|null| null|
| 3| null|null|null| null|
+---+----------+----+----+-----+
>>> df.select("id", "a_map", posexplode_outer("an_array")).show()
+---+----------+----+----+
| id| a_map| pos| col|
+---+----------+----+----+
| 1|{x -> 1.0}| 0| foo|
| 1|{x -> 1.0}| 1| bar|
| 2| {}|null|null|
| 3| null|null|null|
+---+----------+----+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode_outer(_to_java_column(col))
return Column(jc)
def get_json_object(col, path):
"""
Extracts json object from a json string based on json path specified, and returns json string
of the extracted json object. It will return null if the input json string is invalid.
.. versionadded:: 1.6.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
string column in json format
path : str
path to the json object to extract
Examples
--------
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \\
... get_json_object(df.jstring, '$.f2').alias("c1") ).collect()
[Row(key='1', c0='value1', c1='value2'), Row(key='2', c0='value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.get_json_object(_to_java_column(col), path)
return Column(jc)
def json_tuple(col, *fields):
"""Creates a new row for a json column according to the given field names.
.. versionadded:: 1.6.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
string column in json format
fields : str
fields to extract
Examples
--------
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect()
[Row(key='1', c0='value1', c1='value2'), Row(key='2', c0='value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields))
return Column(jc)
def from_json(col, schema, options=None):
"""
Parses a column containing a JSON string into a :class:`MapType` with :class:`StringType`
as keys type, :class:`StructType` or :class:`ArrayType` with
the specified schema. Returns `null`, in the case of an unparseable string.
.. versionadded:: 2.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
string column in json format
schema : :class:`DataType` or str
a StructType or ArrayType of StructType to use when parsing the json column.
.. versionchanged:: 2.3
the DDL-formatted string is also supported for ``schema``.
options : dict, optional
options to control parsing. accepts the same options as the json datasource.
See `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option>`_
in the version you use.
.. # noqa
Examples
--------
>>> from pyspark.sql.types import *
>>> data = [(1, '''{"a": 1}''')]
>>> schema = StructType([StructField("a", IntegerType())])
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "a INT").alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "MAP<STRING,INT>").alias("json")).collect()
[Row(json={'a': 1})]
>>> data = [(1, '''[{"a": 1}]''')]
>>> schema = ArrayType(StructType([StructField("a", IntegerType())]))
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[Row(a=1)])]
>>> schema = schema_of_json(lit('''{"a": 0}'''))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=None))]
>>> data = [(1, '''[1, 2, 3]''')]
>>> schema = ArrayType(IntegerType())
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[1, 2, 3])]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, DataType):
schema = schema.json()
elif isinstance(schema, Column):
schema = _to_java_column(schema)
jc = sc._jvm.functions.from_json(_to_java_column(col), schema, _options_to_str(options))
return Column(jc)
def to_json(col, options=None):
"""
Converts a column containing a :class:`StructType`, :class:`ArrayType` or a :class:`MapType`
into a JSON string. Throws an exception, in the case of an unsupported type.
.. versionadded:: 2.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column containing a struct, an array or a map.
options : dict, optional
options to control converting. accepts the same options as the JSON datasource.
See `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option>`_
in the version you use.
Additionally the function supports the `pretty` option which enables
pretty JSON generation.
.. # noqa
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.sql.types import *
>>> data = [(1, Row(age=2, name='Alice'))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json='{"age":2,"name":"Alice"}')]
>>> data = [(1, [Row(age=2, name='Alice'), Row(age=3, name='Bob')])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json='[{"age":2,"name":"Alice"},{"age":3,"name":"Bob"}]')]
>>> data = [(1, {"name": "Alice"})]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json='{"name":"Alice"}')]
>>> data = [(1, [{"name": "Alice"}, {"name": "Bob"}])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json='[{"name":"Alice"},{"name":"Bob"}]')]
>>> data = [(1, ["Alice", "Bob"])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json='["Alice","Bob"]')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_json(_to_java_column(col), _options_to_str(options))
return Column(jc)
def schema_of_json(json, options=None):
"""
Parses a JSON string and infers its schema in DDL format.
.. versionadded:: 2.4.0
Parameters
----------
json : :class:`~pyspark.sql.Column` or str
a JSON string or a foldable string column containing a JSON string.
options : dict, optional
options to control parsing. accepts the same options as the JSON datasource.
See `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option>`_
in the version you use.
.. # noqa
.. versionchanged:: 3.0
It accepts `options` parameter to control schema inferring.
Examples
--------
>>> df = spark.range(1)
>>> df.select(schema_of_json(lit('{"a": 0}')).alias("json")).collect()
[Row(json='STRUCT<`a`: BIGINT>')]
>>> schema = schema_of_json('{a: 1}', {'allowUnquotedFieldNames':'true'})
>>> df.select(schema.alias("json")).collect()
[Row(json='STRUCT<`a`: BIGINT>')]
"""
if isinstance(json, str):
col = _create_column_from_literal(json)
elif isinstance(json, Column):
col = _to_java_column(json)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_json(col, _options_to_str(options))
return Column(jc)
def schema_of_csv(csv, options=None):
"""
Parses a CSV string and infers its schema in DDL format.
.. versionadded:: 3.0.0
Parameters
----------
csv : :class:`~pyspark.sql.Column` or str
a CSV string or a foldable string column containing a CSV string.
options : dict, optional
options to control parsing. accepts the same options as the CSV datasource.
See `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-csv.html#data-source-option>`_
in the version you use.
.. # noqa
Examples
--------
>>> df = spark.range(1)
>>> df.select(schema_of_csv(lit('1|a'), {'sep':'|'}).alias("csv")).collect()
[Row(csv='STRUCT<`_c0`: INT, `_c1`: STRING>')]
>>> df.select(schema_of_csv('1|a', {'sep':'|'}).alias("csv")).collect()
[Row(csv='STRUCT<`_c0`: INT, `_c1`: STRING>')]
"""
if isinstance(csv, str):
col = _create_column_from_literal(csv)
elif isinstance(csv, Column):
col = _to_java_column(csv)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_csv(col, _options_to_str(options))
return Column(jc)
def to_csv(col, options=None):
"""
Converts a column containing a :class:`StructType` into a CSV string.
Throws an exception, in the case of an unsupported type.
.. versionadded:: 3.0.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column containing a struct.
options: dict, optional
options to control converting. accepts the same options as the CSV datasource.
See `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-csv.html#data-source-option>`_
in the version you use.
.. # noqa
Examples
--------
>>> from pyspark.sql import Row
>>> data = [(1, Row(age=2, name='Alice'))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_csv(df.value).alias("csv")).collect()
[Row(csv='2,Alice')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_csv(_to_java_column(col), _options_to_str(options))
return Column(jc)
def size(col):
"""
Collection function: returns the length of the array or map stored in the column.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data'])
>>> df.select(size(df.data)).collect()
[Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.size(_to_java_column(col)))
def array_min(col):
"""
Collection function: returns the minimum value of the array.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_min(df.data).alias('min')).collect()
[Row(min=1), Row(min=-1)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_min(_to_java_column(col)))
def array_max(col):
"""
Collection function: returns the maximum value of the array.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_max(df.data).alias('max')).collect()
[Row(max=3), Row(max=10)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_max(_to_java_column(col)))
def sort_array(col, asc=True):
"""
Collection function: sorts the input array in ascending or descending order according
to the natural ordering of the array elements. Null elements will be placed at the beginning
of the returned array in ascending order or at the end of the returned array in descending
order.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
asc : bool, optional
Examples
--------
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(sort_array(df.data).alias('r')).collect()
[Row(r=[None, 1, 2, 3]), Row(r=[1]), Row(r=[])]
>>> df.select(sort_array(df.data, asc=False).alias('r')).collect()
[Row(r=[3, 2, 1, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.sort_array(_to_java_column(col), asc))
def array_sort(col):
"""
Collection function: sorts the input array in ascending order. The elements of the input array
must be orderable. Null elements will be placed at the end of the returned array.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(array_sort(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_sort(_to_java_column(col)))
def shuffle(col):
"""
Collection function: Generates a random permutation of the given array.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Notes
-----
The function is non-deterministic.
Examples
--------
>>> df = spark.createDataFrame([([1, 20, 3, 5],), ([1, 20, None, 3],)], ['data'])
>>> df.select(shuffle(df.data).alias('s')).collect() # doctest: +SKIP
[Row(s=[3, 1, 5, 20]), Row(s=[20, None, 3, 1])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shuffle(_to_java_column(col)))
def reverse(col):
"""
Collection function: returns a reversed string or an array with reverse order of elements.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([('Spark SQL',)], ['data'])
>>> df.select(reverse(df.data).alias('s')).collect()
[Row(s='LQS krapS')]
>>> df = spark.createDataFrame([([2, 1, 3],) ,([1],) ,([],)], ['data'])
>>> df.select(reverse(df.data).alias('r')).collect()
[Row(r=[3, 1, 2]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.reverse(_to_java_column(col)))
def flatten(col):
"""
Collection function: creates a single array from an array of arrays.
If a structure of nested arrays is deeper than two levels,
only one level of nesting is removed.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([([[1, 2, 3], [4, 5], [6]],), ([None, [4, 5]],)], ['data'])
>>> df.select(flatten(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, 4, 5, 6]), Row(r=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.flatten(_to_java_column(col)))
def map_keys(col):
"""
Collection function: Returns an unordered array containing the keys of the map.
.. versionadded:: 2.3.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> from pyspark.sql.functions import map_keys
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_keys("data").alias("keys")).show()
+------+
| keys|
+------+
|[1, 2]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_keys(_to_java_column(col)))
def map_values(col):
"""
Collection function: Returns an unordered array containing the values of the map.
.. versionadded:: 2.3.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> from pyspark.sql.functions import map_values
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_values("data").alias("values")).show()
+------+
|values|
+------+
|[a, b]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_values(_to_java_column(col)))
def map_entries(col):
"""
Collection function: Returns an unordered array of all entries in the given map.
.. versionadded:: 3.0.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> from pyspark.sql.functions import map_entries
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_entries("data").alias("entries")).show()
+----------------+
| entries|
+----------------+
|[{1, a}, {2, b}]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_entries(_to_java_column(col)))
def map_from_entries(col):
"""
Collection function: Returns a map created from the given array of entries.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> from pyspark.sql.functions import map_from_entries
>>> df = spark.sql("SELECT array(struct(1, 'a'), struct(2, 'b')) as data")
>>> df.select(map_from_entries("data").alias("map")).show()
+----------------+
| map|
+----------------+
|{1 -> a, 2 -> b}|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_entries(_to_java_column(col)))
def array_repeat(col, count):
"""
Collection function: creates an array containing a column repeated count times.
.. versionadded:: 2.4.0
Examples
--------
>>> df = spark.createDataFrame([('ab',)], ['data'])
>>> df.select(array_repeat(df.data, 3).alias('r')).collect()
[Row(r=['ab', 'ab', 'ab'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_repeat(
_to_java_column(col),
_to_java_column(count) if isinstance(count, Column) else count
))
def arrays_zip(*cols):
"""
Collection function: Returns a merged array of structs in which the N-th struct contains all
N-th values of input arrays.
.. versionadded:: 2.4.0
Parameters
----------
cols : :class:`~pyspark.sql.Column` or str
columns of arrays to be merged.
Examples
--------
>>> from pyspark.sql.functions import arrays_zip
>>> df = spark.createDataFrame([(([1, 2, 3], [2, 3, 4]))], ['vals1', 'vals2'])
>>> df.select(arrays_zip(df.vals1, df.vals2).alias('zipped')).collect()
[Row(zipped=[Row(vals1=1, vals2=2), Row(vals1=2, vals2=3), Row(vals1=3, vals2=4)])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_zip(_to_seq(sc, cols, _to_java_column)))
def map_concat(*cols):
"""Returns the union of all the given maps.
.. versionadded:: 2.4.0
Parameters
----------
cols : :class:`~pyspark.sql.Column` or str
column names or :class:`~pyspark.sql.Column`\\s
Examples
--------
>>> from pyspark.sql.functions import map_concat
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c') as map2")
>>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False)
+------------------------+
|map3 |
+------------------------+
|{1 -> a, 2 -> b, 3 -> c}|
+------------------------+
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map_concat(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def sequence(start, stop, step=None):
"""
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
.. versionadded:: 2.4.0
Examples
--------
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])]
"""
sc = SparkContext._active_spark_context
if step is None:
return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop)))
else:
return Column(sc._jvm.functions.sequence(
_to_java_column(start), _to_java_column(stop), _to_java_column(step)))
def from_csv(col, schema, options=None):
"""
Parses a column containing a CSV string to a row with the specified schema.
Returns `null`, in the case of an unparseable string.
.. versionadded:: 3.0.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
string column in CSV format
schema :class:`~pyspark.sql.Column` or str
a string with schema in DDL format to use when parsing the CSV column.
options : dict, optional
options to control parsing. accepts the same options as the CSV datasource.
See `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-csv.html#data-source-option>`_
in the version you use.
.. # noqa
Examples
--------
>>> data = [("1,2,3",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> df.select(from_csv(df.value, "a INT, b INT, c INT").alias("csv")).collect()
[Row(csv=Row(a=1, b=2, c=3))]
>>> value = data[0][0]
>>> df.select(from_csv(df.value, schema_of_csv(value)).alias("csv")).collect()
[Row(csv=Row(_c0=1, _c1=2, _c2=3))]
>>> data = [(" abc",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> options = {'ignoreLeadingWhiteSpace': True}
>>> df.select(from_csv(df.value, "s string", options).alias("csv")).collect()
[Row(csv=Row(s='abc'))]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, str):
schema = _create_column_from_literal(schema)
elif isinstance(schema, Column):
schema = _to_java_column(schema)
else:
raise TypeError("schema argument should be a column or string")
jc = sc._jvm.functions.from_csv(_to_java_column(col), schema, _options_to_str(options))
return Column(jc)
def _unresolved_named_lambda_variable(*name_parts):
"""
Create `o.a.s.sql.expressions.UnresolvedNamedLambdaVariable`,
convert it to o.s.sql.Column and wrap in Python `Column`
Parameters
----------
name_parts : str
"""
sc = SparkContext._active_spark_context
name_parts_seq = _to_seq(sc, name_parts)
expressions = sc._jvm.org.apache.spark.sql.catalyst.expressions
return Column(
sc._jvm.Column(
expressions.UnresolvedNamedLambdaVariable(name_parts_seq)
)
)
def _get_lambda_parameters(f):
import inspect
signature = inspect.signature(f)
parameters = signature.parameters.values()
# We should exclude functions that use
# variable args and keyword argnames
# as well as keyword only args
supported_parameter_types = {
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.POSITIONAL_ONLY,
}
# Validate that
# function arity is between 1 and 3
if not (1 <= len(parameters) <= 3):
raise ValueError(
"f should take between 1 and 3 arguments, but provided function takes {}".format(
len(parameters)
)
)
# and all arguments can be used as positional
if not all(p.kind in supported_parameter_types for p in parameters):
raise ValueError(
"f should use only POSITIONAL or POSITIONAL OR KEYWORD arguments"
)
return parameters
def _create_lambda(f):
"""
Create `o.a.s.sql.expressions.LambdaFunction` corresponding
to transformation described by f
:param f: A Python of one of the following forms:
- (Column) -> Column: ...
- (Column, Column) -> Column: ...
- (Column, Column, Column) -> Column: ...
"""
parameters = _get_lambda_parameters(f)
sc = SparkContext._active_spark_context
expressions = sc._jvm.org.apache.spark.sql.catalyst.expressions
argnames = ["x", "y", "z"]
args = [
_unresolved_named_lambda_variable(
expressions.UnresolvedNamedLambdaVariable.freshVarName(arg)
)
for arg in argnames[: len(parameters)]
]
result = f(*args)
if not isinstance(result, Column):
raise ValueError("f should return Column, got {}".format(type(result)))
jexpr = result._jc.expr()
jargs = _to_seq(sc, [arg._jc.expr() for arg in args])
return expressions.LambdaFunction(jexpr, jargs, False)
def _invoke_higher_order_function(name, cols, funs):
"""
Invokes expression identified by name,
(relative to ```org.apache.spark.sql.catalyst.expressions``)
and wraps the result with Column (first Scala one, then Python).
:param name: Name of the expression
:param cols: a list of columns
:param funs: a list of((*Column) -> Column functions.
:return: a Column
"""
sc = SparkContext._active_spark_context
expressions = sc._jvm.org.apache.spark.sql.catalyst.expressions
expr = getattr(expressions, name)
jcols = [_to_java_column(col).expr() for col in cols]
jfuns = [_create_lambda(f) for f in funs]
return Column(sc._jvm.Column(expr(*jcols + jfuns)))
def transform(col, f):
"""
Returns an array of elements after applying a transformation to each element in the input array.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
f : function
a function that is applied to each element of the input array.
Can take one of the following forms:
- Unary ``(x: Column) -> Column: ...``
- Binary ``(x: Column, i: Column) -> Column...``, where the second argument is
a 0-based index of the element.
and can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, [1, 2, 3, 4])], ("key", "values"))
>>> df.select(transform("values", lambda x: x * 2).alias("doubled")).show()
+------------+
| doubled|
+------------+
|[2, 4, 6, 8]|
+------------+
>>> def alternate(x, i):
... return when(i % 2 == 0, x).otherwise(-x)
>>> df.select(transform("values", alternate).alias("alternated")).show()
+--------------+
| alternated|
+--------------+
|[1, -2, 3, -4]|
+--------------+
"""
return _invoke_higher_order_function("ArrayTransform", [col], [f])
def exists(col, f):
"""
Returns whether a predicate holds for one or more elements in the array.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
f : function
``(x: Column) -> Column: ...`` returning the Boolean expression.
Can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
:return: a :class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, [1, 2, 3, 4]), (2, [3, -1, 0])],("key", "values"))
>>> df.select(exists("values", lambda x: x < 0).alias("any_negative")).show()
+------------+
|any_negative|
+------------+
| false|
| true|
+------------+
"""
return _invoke_higher_order_function("ArrayExists", [col], [f])
def forall(col, f):
"""
Returns whether a predicate holds for every element in the array.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
f : function
``(x: Column) -> Column: ...`` returning the Boolean expression.
Can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame(
... [(1, ["bar"]), (2, ["foo", "bar"]), (3, ["foobar", "foo"])],
... ("key", "values")
... )
>>> df.select(forall("values", lambda x: x.rlike("foo")).alias("all_foo")).show()
+-------+
|all_foo|
+-------+
| false|
| false|
| true|
+-------+
"""
return _invoke_higher_order_function("ArrayForAll", [col], [f])
def filter(col, f):
"""
Returns an array of elements for which a predicate holds in a given array.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
f : function
A function that returns the Boolean expression.
Can take one of the following forms:
- Unary ``(x: Column) -> Column: ...``
- Binary ``(x: Column, i: Column) -> Column...``, where the second argument is
a 0-based index of the element.
and can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame(
... [(1, ["2018-09-20", "2019-02-03", "2019-07-01", "2020-06-01"])],
... ("key", "values")
... )
>>> def after_second_quarter(x):
... return month(to_date(x)) > 6
>>> df.select(
... filter("values", after_second_quarter).alias("after_second_quarter")
... ).show(truncate=False)
+------------------------+
|after_second_quarter |
+------------------------+
|[2018-09-20, 2019-07-01]|
+------------------------+
"""
return _invoke_higher_order_function("ArrayFilter", [col], [f])
def aggregate(col, initialValue, merge, finish=None):
"""
Applies a binary operator to an initial state and all elements in the array,
and reduces this to a single state. The final state is converted into the final result
by applying a finish function.
Both functions can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
initialValue : :class:`~pyspark.sql.Column` or str
initial value. Name of column or expression
merge : function
a binary function ``(acc: Column, x: Column) -> Column...`` returning expression
of the same type as ``zero``
finish : function
an optional unary function ``(x: Column) -> Column: ...``
used to convert accumulated value.
Returns
-------
:class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, [20.0, 4.0, 2.0, 6.0, 10.0])], ("id", "values"))
>>> df.select(aggregate("values", lit(0.0), lambda acc, x: acc + x).alias("sum")).show()
+----+
| sum|
+----+
|42.0|
+----+
>>> def merge(acc, x):
... count = acc.count + 1
... sum = acc.sum + x
... return struct(count.alias("count"), sum.alias("sum"))
>>> df.select(
... aggregate(
... "values",
... struct(lit(0).alias("count"), lit(0.0).alias("sum")),
... merge,
... lambda acc: acc.sum / acc.count,
... ).alias("mean")
... ).show()
+----+
|mean|
+----+
| 8.4|
+----+
"""
if finish is not None:
return _invoke_higher_order_function(
"ArrayAggregate",
[col, initialValue],
[merge, finish]
)
else:
return _invoke_higher_order_function(
"ArrayAggregate",
[col, initialValue],
[merge]
)
def zip_with(left, right, f):
"""
Merge two given arrays, element-wise, into a single array using a function.
If one array is shorter, nulls are appended at the end to match the length of the longer
array, before applying the function.
.. versionadded:: 3.1.0
Parameters
----------
left : :class:`~pyspark.sql.Column` or str
name of the first column or expression
right : :class:`~pyspark.sql.Column` or str
name of the second column or expression
f : function
a binary function ``(x1: Column, x2: Column) -> Column...``
Can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, [1, 3, 5, 8], [0, 2, 4, 6])], ("id", "xs", "ys"))
>>> df.select(zip_with("xs", "ys", lambda x, y: x ** y).alias("powers")).show(truncate=False)
+---------------------------+
|powers |
+---------------------------+
|[1.0, 9.0, 625.0, 262144.0]|
+---------------------------+
>>> df = spark.createDataFrame([(1, ["foo", "bar"], [1, 2, 3])], ("id", "xs", "ys"))
>>> df.select(zip_with("xs", "ys", lambda x, y: concat_ws("_", x, y)).alias("xs_ys")).show()
+-----------------+
| xs_ys|
+-----------------+
|[foo_1, bar_2, 3]|
+-----------------+
"""
return _invoke_higher_order_function("ZipWith", [left, right], [f])
def transform_keys(col, f):
"""
Applies a function to every key-value pair in a map and returns
a map with the results of those applications as the new keys for the pairs.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
f : function
a binary function ``(k: Column, v: Column) -> Column...``
Can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, {"foo": -2.0, "bar": 2.0})], ("id", "data"))
>>> df.select(transform_keys(
... "data", lambda k, _: upper(k)).alias("data_upper")
... ).show(truncate=False)
+-------------------------+
|data_upper |
+-------------------------+
|{BAR -> 2.0, FOO -> -2.0}|
+-------------------------+
"""
return _invoke_higher_order_function("TransformKeys", [col], [f])
def transform_values(col, f):
"""
Applies a function to every key-value pair in a map and returns
a map with the results of those applications as the new values for the pairs.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
f : function
a binary function ``(k: Column, v: Column) -> Column...``
Can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, {"IT": 10.0, "SALES": 2.0, "OPS": 24.0})], ("id", "data"))
>>> df.select(transform_values(
... "data", lambda k, v: when(k.isin("IT", "OPS"), v + 10.0).otherwise(v)
... ).alias("new_data")).show(truncate=False)
+---------------------------------------+
|new_data |
+---------------------------------------+
|{OPS -> 34.0, IT -> 20.0, SALES -> 2.0}|
+---------------------------------------+
"""
return _invoke_higher_order_function("TransformValues", [col], [f])
def map_filter(col, f):
"""
Returns a map whose key-value pairs satisfy a predicate.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
f : function
a binary function ``(k: Column, v: Column) -> Column...``
Can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, {"foo": 42.0, "bar": 1.0, "baz": 32.0})], ("id", "data"))
>>> df.select(map_filter(
... "data", lambda _, v: v > 30.0).alias("data_filtered")
... ).show(truncate=False)
+--------------------------+
|data_filtered |
+--------------------------+
|{baz -> 32.0, foo -> 42.0}|
+--------------------------+
"""
return _invoke_higher_order_function("MapFilter", [col], [f])
def map_zip_with(col1, col2, f):
"""
Merge two given maps, key-wise into a single map using a function.
.. versionadded:: 3.1.0
Parameters
----------
col1 : :class:`~pyspark.sql.Column` or str
name of the first column or expression
col2 : :class:`~pyspark.sql.Column` or str
name of the second column or expression
f : function
a ternary function ``(k: Column, v1: Column, v2: Column) -> Column...``
Can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([
... (1, {"IT": 24.0, "SALES": 12.00}, {"IT": 2.0, "SALES": 1.4})],
... ("id", "base", "ratio")
... )
>>> df.select(map_zip_with(
... "base", "ratio", lambda k, v1, v2: round(v1 * v2, 2)).alias("updated_data")
... ).show(truncate=False)
+---------------------------+
|updated_data |
+---------------------------+
|{SALES -> 16.8, IT -> 48.0}|
+---------------------------+
"""
return _invoke_higher_order_function("MapZipWith", [col1, col2], [f])
# ---------------------- Partition transform functions --------------------------------
def years(col):
"""
Partition transform function: A transform for timestamps and dates
to partition data into years.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").partitionedBy( # doctest: +SKIP
... years("ts")
... ).createOrReplace()
Notes
-----
This function can be used only in combination with
:py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy`
method of the `DataFrameWriterV2`.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.years(_to_java_column(col)))
def months(col):
"""
Partition transform function: A transform for timestamps and dates
to partition data into months.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").partitionedBy(
... months("ts")
... ).createOrReplace() # doctest: +SKIP
Notes
-----
This function can be used only in combination with
:py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy`
method of the `DataFrameWriterV2`.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.months(_to_java_column(col)))
def days(col):
"""
Partition transform function: A transform for timestamps and dates
to partition data into days.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").partitionedBy( # doctest: +SKIP
... days("ts")
... ).createOrReplace()
Notes
-----
This function can be used only in combination with
:py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy`
method of the `DataFrameWriterV2`.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.days(_to_java_column(col)))
def hours(col):
"""
Partition transform function: A transform for timestamps
to partition data into hours.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").partitionedBy( # doctest: +SKIP
... hours("ts")
... ).createOrReplace()
Notes
-----
This function can be used only in combination with
:py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy`
method of the `DataFrameWriterV2`.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.hours(_to_java_column(col)))
def bucket(numBuckets, col):
"""
Partition transform function: A transform for any type that partitions
by a hash of the input column.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").partitionedBy( # doctest: +SKIP
... bucket(42, "ts")
... ).createOrReplace()
Notes
-----
This function can be used only in combination with
:py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy`
method of the `DataFrameWriterV2`.
"""
if not isinstance(numBuckets, (int, Column)):
raise TypeError(
"numBuckets should be a Column or an int, got {}".format(type(numBuckets))
)
sc = SparkContext._active_spark_context
numBuckets = (
_create_column_from_literal(numBuckets)
if isinstance(numBuckets, int)
else _to_java_column(numBuckets)
)
return Column(sc._jvm.functions.bucket(numBuckets, _to_java_column(col)))
# ---------------------------- User Defined Function ----------------------------------
def udf(f=None, returnType=StringType()):
"""Creates a user defined function (UDF).
.. versionadded:: 1.3.0
Parameters
----------
f : function
python function if used as a standalone function
returnType : :class:`pyspark.sql.types.DataType` or str
the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
Examples
--------
>>> from pyspark.sql.types import IntegerType
>>> slen = udf(lambda s: len(s), IntegerType())
>>> @udf
... def to_upper(s):
... if s is not None:
... return s.upper()
...
>>> @udf(returnType=IntegerType())
... def add_one(x):
... if x is not None:
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age"))
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show()
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
Notes
-----
The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> from pyspark.sql.types import IntegerType
>>> import random
>>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic()
The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
The user-defined functions do not take keyword arguments on the calling side.
"""
# The following table shows most of Python data and SQL type conversions in normal UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-28131's PR to see the codes in order to generate the table below.
#
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
# |SQL Type \ Python Value(Type)|None(NoneType)|True(bool)|1(int)| a(str)| 1970-01-01(date)|1970-01-01 00:00:00(datetime)|1.0(float)|array('i', [1])(array)|[1](list)| (1,)(tuple)|bytearray(b'ABC')(bytearray)| 1(Decimal)|{'a': 1}(dict)|Row(kwargs=1)(Row)|Row(namedtuple=1)(Row)| # noqa
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
# | boolean| None| True| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | tinyint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | smallint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | int| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | bigint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | string| None| 'true'| '1'| 'a'|'java.util.Gregor...| 'java.util.Gregor...| '1.0'| '[I@66cbb73a'| '[1]'|'[Ljava.lang.Obje...| '[B@5a51eb1a'| '1'| '{a=1}'| X| X| # noqa
# | date| None| X| X| X|datetime.date(197...| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X| datetime.datetime...| X| X| X| X| X| X| X| X| X| # noqa
# | float| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | double| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | array<int>| None| None| None| None| None| None| None| [1]| [1]| [1]| [65, 66, 67]| None| None| X| X| # noqa
# | binary| None| None| None|bytearray(b'a')| None| None| None| None| None| None| bytearray(b'ABC')| None| None| X| X| # noqa
# | decimal(10,0)| None| None| None| None| None| None| None| None| None| None| None|Decimal('1')| None| X| X| # noqa
# | map<string,int>| None| None| None| None| None| None| None| None| None| None| None| None| {'a': 1}| X| X| # noqa
# | struct<_1:int>| None| X| X| X| X| X| X| X|Row(_1=1)| Row(_1=1)| X| X| Row(_1=None)| Row(_1=1)| Row(_1=1)| # noqa
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: 'X' means it throws an exception during the conversion.
# Note: Python 3.7.3 is used.
# decorator @udf, @udf(), @udf(dataType())
if f is None or isinstance(f, (str, DataType)):
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
return functools.partial(_create_udf, returnType=return_type,
evalType=PythonEvalType.SQL_BATCHED_UDF)
else:
return _create_udf(f=f, returnType=returnType,
evalType=PythonEvalType.SQL_BATCHED_UDF)
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.functions
globs = pyspark.sql.functions.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.functions tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = spark.createDataFrame([Row(age=2, name='Alice'), Row(age=5, name='Bob')])
(failure_count, test_count) = doctest.testmod(
pyspark.sql.functions, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
Charence/stk-code | tools/batch.py | 16 | 3189 | from matplotlib import pyplot
from os import listdir
def is_numeric(x):
try:
float(x)
except ValueError:
return False
return True
avg_lap_time = {}
avg_pos = {}
avg_speed = {}
avg_top = {}
total_rescued = {}
tests = len(listdir('../../batch'))-1
for file in listdir('../../batch'):
if (file == '.DS_Store'):
continue
f = open('../../batch/'+file,'r')
'''
name_index = file.find('.')
kart_name = str(file[:name_index])
first = file.find('.',name_index+1)
track_name = file[name_index+1:first]
second = file.find('.',first+1)
run = int(file[first+1:second])
'''
track_name = "snowmountain"
kart_names = ["gnu", "sara", "tux", "elephpant"]
if track_name == "snowmountain":
contents = f.readlines()
'''
contents = contents[2:contents.index("[debug ] profile: \n")-1]
content = [s for s in contents if kart_name in s]
data = [float(x) for x in content[0].split() if is_numeric(x)]
if kart_name not in avg_lap_time:
avg_lap_time[kart_name] = []
avg_pos[kart_name] = []
avg_speed[kart_name] = []
avg_top[kart_name] = []
total_rescued[kart_name] = []
avg_lap_time[kart_name].append(data[2]/4)
avg_pos[kart_name].append(data[1])
avg_speed[kart_name].append(data[3])
avg_top[kart_name].append(data[4])
total_rescued[kart_name].append(data[7])
'''
contents = contents[2:6] #TODO check if all is in here
for kart in kart_names:
content = [s for s in contents if kart in s]
data = [float(x) for x in content[0].split() if is_numeric(x)]
if kart not in avg_lap_time:
avg_lap_time[kart] = []
avg_pos[kart] = []
avg_speed[kart] = []
avg_top[kart] = []
total_rescued[kart] = []
avg_lap_time[kart].append(data[2]/4)
avg_pos[kart].append(data[1])
avg_speed[kart].append(data[3])
avg_top[kart].append(data[4])
total_rescued[kart].append(data[7])
tests = len(avg_lap_time["gnu"])
print total_rescued
for kart in kart_names:
print "rescues for ", kart , ": ", sum(total_rescued[kart])/tests
print "avg_lap_time for " , kart , ": " , sum(avg_lap_time[kart])/tests
print "avg_pos for " , kart , ": " , sum(avg_pos[kart])/tests
print "avg_speed for " , kart , ": " , sum(avg_speed[kart])/tests
print "avg_top for " , kart , ": " , sum(avg_top[kart])/tests
pyplot.subplot(2,2,1)
pyplot.plot(list(xrange(tests)),avg_pos["gnu"], "b-")
pyplot.xlabel("tests")
pyplot.ylabel("gnu")
pyplot.subplot(2,2,2)
pyplot.plot(list(xrange(tests)),avg_pos["sara"], "r-")
pyplot.xlabel("tests")
pyplot.ylabel("sara")
pyplot.subplot(2,2,3)
pyplot.plot(list(xrange(tests)),avg_pos["elephpant"], "y-")
pyplot.xlabel("tests")
pyplot.ylabel("elephpant")
pyplot.subplot(2,2,4)
pyplot.plot(list(xrange(tests)),avg_pos["tux"], "g-")
pyplot.xlabel("tests")
pyplot.ylabel("tux")
pyplot.show()
| gpl-3.0 |
belltailjp/scikit-learn | sklearn/decomposition/base.py | 313 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Kyle Kastner <kastnerkyle@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
tosolveit/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
caseyclements/bokeh | bokeh/compat/mplexporter/exporter.py | 32 | 12403 | """
Matplotlib Exporter
===================
This submodule contains tools for crawling a matplotlib figure and exporting
relevant pieces to a renderer.
"""
import warnings
import io
from . import utils
import matplotlib
from matplotlib import transforms
from matplotlib.backends.backend_agg import FigureCanvasAgg
class Exporter(object):
"""Matplotlib Exporter
Parameters
----------
renderer : Renderer object
The renderer object called by the exporter to create a figure
visualization. See mplexporter.Renderer for information on the
methods which should be defined within the renderer.
close_mpl : bool
If True (default), close the matplotlib figure as it is rendered. This
is useful for when the exporter is used within the notebook, or with
an interactive matplotlib backend.
"""
def __init__(self, renderer, close_mpl=True):
self.close_mpl = close_mpl
self.renderer = renderer
def run(self, fig):
"""
Run the exporter on the given figure
Parmeters
---------
fig : matplotlib.Figure instance
The figure to export
"""
# Calling savefig executes the draw() command, putting elements
# in the correct place.
if fig.canvas is None:
fig.canvas = FigureCanvasAgg(fig)
fig.savefig(io.BytesIO(), format='png', dpi=fig.dpi)
if self.close_mpl:
import matplotlib.pyplot as plt
plt.close(fig)
self.crawl_fig(fig)
@staticmethod
def process_transform(transform, ax=None, data=None, return_trans=False,
force_trans=None):
"""Process the transform and convert data to figure or data coordinates
Parameters
----------
transform : matplotlib Transform object
The transform applied to the data
ax : matplotlib Axes object (optional)
The axes the data is associated with
data : ndarray (optional)
The array of data to be transformed.
return_trans : bool (optional)
If true, return the final transform of the data
force_trans : matplotlib.transform instance (optional)
If supplied, first force the data to this transform
Returns
-------
code : string
Code is either "data", "axes", "figure", or "display", indicating
the type of coordinates output.
transform : matplotlib transform
the transform used to map input data to output data.
Returned only if return_trans is True
new_data : ndarray
Data transformed to match the given coordinate code.
Returned only if data is specified
"""
if isinstance(transform, transforms.BlendedGenericTransform):
warnings.warn("Blended transforms not yet supported. "
"Zoom behavior may not work as expected.")
if force_trans is not None:
if data is not None:
data = (transform - force_trans).transform(data)
transform = force_trans
code = "display"
if ax is not None:
for (c, trans) in [("data", ax.transData),
("axes", ax.transAxes),
("figure", ax.figure.transFigure),
("display", transforms.IdentityTransform())]:
if transform.contains_branch(trans):
code, transform = (c, transform - trans)
break
if data is not None:
if return_trans:
return code, transform.transform(data), transform
else:
return code, transform.transform(data)
else:
if return_trans:
return code, transform
else:
return code
def crawl_fig(self, fig):
"""Crawl the figure and process all axes"""
with self.renderer.draw_figure(fig=fig,
props=utils.get_figure_properties(fig)):
for ax in fig.axes:
self.crawl_ax(ax)
def crawl_ax(self, ax):
"""Crawl the axes and process all elements within"""
with self.renderer.draw_axes(ax=ax,
props=utils.get_axes_properties(ax)):
for line in ax.lines:
self.draw_line(ax, line)
for text in ax.texts:
self.draw_text(ax, text)
for (text, ttp) in zip([ax.xaxis.label, ax.yaxis.label, ax.title],
["xlabel", "ylabel", "title"]):
if(hasattr(text, 'get_text') and text.get_text()):
self.draw_text(ax, text, force_trans=ax.transAxes,
text_type=ttp)
for artist in ax.artists:
# TODO: process other artists
if isinstance(artist, matplotlib.text.Text):
self.draw_text(ax, artist)
for patch in ax.patches:
self.draw_patch(ax, patch)
for collection in ax.collections:
self.draw_collection(ax, collection)
for image in ax.images:
self.draw_image(ax, image)
legend = ax.get_legend()
if legend is not None:
props = utils.get_legend_properties(ax, legend)
with self.renderer.draw_legend(legend=legend, props=props):
if props['visible']:
self.crawl_legend(ax, legend)
def crawl_legend(self, ax, legend):
"""
Recursively look through objects in legend children
"""
legendElements = list(utils.iter_all_children(legend._legend_box,
skipContainers=True))
legendElements.append(legend.legendPatch)
for child in legendElements:
# force a large zorder so it appears on top
child.set_zorder(1E6 + child.get_zorder())
try:
# What kind of object...
if isinstance(child, matplotlib.patches.Patch):
self.draw_patch(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.text.Text):
if not (child is legend.get_children()[-1]
and child.get_text() == 'None'):
self.draw_text(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.lines.Line2D):
self.draw_line(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.collections.Collection):
self.draw_collection(ax, child,
force_pathtrans=ax.transAxes)
else:
warnings.warn("Legend element %s not impemented" % child)
except NotImplementedError:
warnings.warn("Legend element %s not impemented" % child)
def draw_line(self, ax, line, force_trans=None):
"""Process a matplotlib line and call renderer.draw_line"""
coordinates, data = self.process_transform(line.get_transform(),
ax, line.get_xydata(),
force_trans=force_trans)
linestyle = utils.get_line_style(line)
if linestyle['dasharray'] is None:
linestyle = None
markerstyle = utils.get_marker_style(line)
if (markerstyle['marker'] in ['None', 'none', None]
or markerstyle['markerpath'][0].size == 0):
markerstyle = None
label = line.get_label()
if markerstyle or linestyle:
self.renderer.draw_marked_line(data=data, coordinates=coordinates,
linestyle=linestyle,
markerstyle=markerstyle,
label=label,
mplobj=line)
def draw_text(self, ax, text, force_trans=None, text_type=None):
"""Process a matplotlib text object and call renderer.draw_text"""
content = text.get_text()
if content:
transform = text.get_transform()
position = text.get_position()
coords, position = self.process_transform(transform, ax,
position,
force_trans=force_trans)
style = utils.get_text_style(text)
self.renderer.draw_text(text=content, position=position,
coordinates=coords,
text_type=text_type,
style=style, mplobj=text)
def draw_patch(self, ax, patch, force_trans=None):
"""Process a matplotlib patch object and call renderer.draw_path"""
vertices, pathcodes = utils.SVG_path(patch.get_path())
transform = patch.get_transform()
coordinates, vertices = self.process_transform(transform,
ax, vertices,
force_trans=force_trans)
linestyle = utils.get_path_style(patch, fill=patch.get_fill())
self.renderer.draw_path(data=vertices,
coordinates=coordinates,
pathcodes=pathcodes,
style=linestyle,
mplobj=patch)
def draw_collection(self, ax, collection,
force_pathtrans=None,
force_offsettrans=None):
"""Process a matplotlib collection and call renderer.draw_collection"""
(transform, transOffset,
offsets, paths) = collection._prepare_points()
offset_coords, offsets = self.process_transform(
transOffset, ax, offsets, force_trans=force_offsettrans)
path_coords = self.process_transform(
transform, ax, force_trans=force_pathtrans)
processed_paths = [utils.SVG_path(path) for path in paths]
processed_paths = [(self.process_transform(
transform, ax, path[0], force_trans=force_pathtrans)[1], path[1])
for path in processed_paths]
path_transforms = collection.get_transforms()
try:
# matplotlib 1.3: path_transforms are transform objects.
# Convert them to numpy arrays.
path_transforms = [t.get_matrix() for t in path_transforms]
except AttributeError:
# matplotlib 1.4: path transforms are already numpy arrays.
pass
styles = {'linewidth': collection.get_linewidths(),
'facecolor': collection.get_facecolors(),
'edgecolor': collection.get_edgecolors(),
'alpha': collection._alpha,
'zorder': collection.get_zorder()}
offset_dict = {"data": "before",
"screen": "after"}
offset_order = offset_dict[collection.get_offset_position()]
self.renderer.draw_path_collection(paths=processed_paths,
path_coordinates=path_coords,
path_transforms=path_transforms,
offsets=offsets,
offset_coordinates=offset_coords,
offset_order=offset_order,
styles=styles,
mplobj=collection)
def draw_image(self, ax, image):
"""Process a matplotlib image object and call renderer.draw_image"""
self.renderer.draw_image(imdata=utils.image_to_base64(image),
extent=image.get_extent(),
coordinates="data",
style={"alpha": image.get_alpha(),
"zorder": image.get_zorder()},
mplobj=image)
| bsd-3-clause |
harlowja/networkx | examples/drawing/knuth_miles.py | 50 | 2994 | #!/usr/bin/env python
"""
An example using networkx.Graph().
miles_graph() returns an undirected graph over the 128 US cities from
the datafile miles_dat.txt. The cities each have location and population
data. The edges are labeled with the distance betwen the two cities.
This example is described in Section 1.1 in Knuth's book [1,2].
References.
-----------
[1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
[2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
def miles_graph():
""" Return the cites example graph in miles_dat.txt
from the Stanford GraphBase.
"""
# open file miles_dat.txt.gz (or miles_dat.txt)
import gzip
fh = gzip.open('knuth_miles.txt.gz','r')
G=nx.Graph()
G.position={}
G.population={}
cities=[]
for line in fh.readlines():
line = line.decode()
if line.startswith("*"): # skip comments
continue
numfind=re.compile("^\d+")
if numfind.match(line): # this line is distances
dist=line.split()
for d in dist:
G.add_edge(city,cities[i],weight=int(d))
i=i+1
else: # this line is a city, position, population
i=1
(city,coordpop)=line.split("[")
cities.insert(0,city)
(coord,pop)=coordpop.split("]")
(y,x)=coord.split(",")
G.add_node(city)
# assign position - flip x axis for matplotlib, shift origin
G.position[city]=(-int(x)+7500,int(y)-3000)
G.population[city]=float(pop)/1000.0
return G
if __name__ == '__main__':
import networkx as nx
import re
import sys
G=miles_graph()
print("Loaded miles_dat.txt containing 128 cities.")
print("digraph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
# make new graph of cites, edge if less then 300 miles between them
H=nx.Graph()
for v in G:
H.add_node(v)
for (u,v,d) in G.edges(data=True):
if d['weight'] < 300:
H.add_edge(u,v)
# draw with matplotlib/pylab
try:
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
# with nodes colored by degree sized by population
node_color=[float(H.degree(v)) for v in H]
nx.draw(H,G.position,
node_size=[G.population[v] for v in H],
node_color=node_color,
with_labels=False)
# scale the axes equally
plt.xlim(-5000,500)
plt.ylim(-2000,3500)
plt.savefig("knuth_miles.png")
except:
pass
| bsd-3-clause |
jblackburne/scikit-learn | sklearn/neural_network/rbm.py | 46 | 12291 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <dauphiya@iro.umontreal.ca>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='F')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='F')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
abonil91/ncanda-data-integration | scripts/redcap/scoring/ctq/__init__.py | 1 | 3092 | #!/usr/bin/env python
##
## Copyright 2016 SRI International
## See COPYING file distributed along with the package for the copyright and license terms.
##
import pandas
import Rwrapper
#
# Variables from surveys needed for CTQ
#
# LimeSurvey field names
lime_fields = [ "ctq_set1 [ctq1]", "ctq_set1 [ctq2]", "ctq_set1 [ctq3]", "ctq_set1 [ctq4]", "ctq_set1 [ctq5]", "ctq_set1 [ctq6]", "ctq_set1 [ctq7]", "ctq_set2 [ctq8]", "ctq_set2 [ctq9]", "ctq_set2 [ct10]", "ctq_set2 [ct11]",
"ctq_set2 [ct12]", "ctq_set2 [ct13]", "ctq_set2 [ct14]", "ctq_set3 [ctq15]", "ctq_set3 [ctq16]", "ctq_set3 [ctq17]", "ctq_set3 [ctq18]", "ctq_set3 [ctq19]", "ctq_set3 [ctq20]", "ctq_set3 [ctq21]",
"ctq_set4 [ctq22]", "ctq_set4 [ctq23]", "ctq_set4 [ctq24]", "ctq_set4 [ctq25]", "ctq_set4 [ctq26]", "ctq_set4 [ctq27]", "ctq_set4 [ctq28]" ]
# Dictionary to recover LimeSurvey field names from REDCap names
rc2lime = dict()
for field in lime_fields:
rc2lime[Rwrapper.label_to_sri( 'youthreport2', field )] = field
# REDCap fields names
input_fields = { 'mrireport' : [ 'youth_report_2_complete', 'youthreport2_missing' ] + rc2lime.keys() }
#
# This determines the name of the form in REDCap where the results are posted.
#
output_form = 'clinical'
#
# CTQ field names mapping from R to REDCap
#
R2rc = { 'Emotional Abuse Scale Total Score' : 'ctq_ea',
'Physical Abuse Scale Total Score' : 'ctq_pa',
'Sexual Abuse Scale Total Score' : 'ctq_sa',
'Emotional Neglect Scale Total Score' : 'ctq_en',
'Physical Neglect Scale Total Score' : 'ctq_pn',
'Minimization/Denial Scale Total Score' : 'ctq_minds' }
#
# Scoring function - take requested data (as requested by "input_fields") for each (subject,event), and demographics (date of birth, gender) for each subject.
#
def compute_scores( data, demographics ):
# Get rid of all records that don't have YR2
data.dropna( axis=1, subset=['youth_report_2_complete'] )
data = data[ data['youth_report_2_complete'] > 0 ]
data = data[ ~(data['youthreport2_missing'] > 0) ]
# If no records to score, return empty DF
if len( data ) == 0:
return pandas.DataFrame()
# Replace all column labels with the original LimeSurvey names
data.columns = Rwrapper.map_labels( data.columns, rc2lime )
# Call the scoring function for all table rows
scores = data.apply( Rwrapper.runscript, axis=1, Rscript='ctq/CTQ.R', scores_key='CTQ.ary' )
# Replace all score columns with REDCap field names
scores.columns = Rwrapper.map_labels( scores.columns, R2rc )
# Simply copy completion status from the input surveys
scores['ctq_complete'] = data['youth_report_2_complete'].map( int )
# Make a proper multi-index for the scores table
scores.index = pandas.MultiIndex.from_tuples(scores.index)
scores.index.names = ['study_id', 'redcap_event_name']
# Return the computed scores - this is what will be imported back into REDCap
outfield_list = [ 'ctq_complete' ] + R2rc.values()
return scores[ outfield_list ]
| bsd-3-clause |
jorik041/scikit-learn | sklearn/linear_model/randomized_l1.py | 95 | 23365 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
jingxiang-li/kaggle-yelp | model/level3_model_rf.py | 1 | 5669 | from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import f1_score
import argparse
from os import path
import os
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from utils import *
import pickle
np.random.seed(54568464)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--yix', type=int, default=0)
return parser.parse_args()
# functions for hyperparameters optimization
class Score:
def __init__(self, X, y):
self.y = y
self.X = X
def get_score(self, params):
params['n_estimators'] = int(params['n_estimators'])
params['max_depth'] = int(params['max_depth'])
params['min_samples_split'] = int(params['min_samples_split'])
params['min_samples_leaf'] = int(params['min_samples_leaf'])
params['n_estimators'] = int(params['n_estimators'])
print('Training with params:')
print(params)
# cross validation here
scores = []
for train_ix, test_ix in makeKFold(5, self.y, 1):
X_train, y_train = self.X[train_ix, :], self.y[train_ix]
X_test, y_test = self.X[test_ix, :], self.y[test_ix]
weight = y_train.shape[0] / (2 * np.bincount(y_train))
sample_weight = np.array([weight[i] for i in y_train])
clf = RandomForestClassifier(**params)
cclf = CalibratedClassifierCV(base_estimator=clf,
method='isotonic',
cv=makeKFold(3, y_train, 1))
cclf.fit(X_train, y_train, sample_weight)
pred = cclf.predict(X_test)
scores.append(f1_score(y_true=y_test, y_pred=pred))
print(scores)
score = np.mean(scores)
print(score)
return {'loss': -score, 'status': STATUS_OK}
def optimize(trials, X, y, max_evals):
space = {
'n_estimators': hp.quniform('n_estimators', 100, 500, 50),
'criterion': hp.choice('criterion', ['gini', 'entropy']),
'max_depth': hp.quniform('max_depth', 1, 7, 1),
'min_samples_split': hp.quniform('min_samples_split', 1, 9, 2),
'min_samples_leaf': hp.quniform('min_samples_leaf', 1, 5, 1),
'bootstrap': True,
'oob_score': True,
'n_jobs': -1
}
s = Score(X, y)
best = fmin(s.get_score,
space,
algo=tpe.suggest,
trials=trials,
max_evals=max_evals
)
best['n_estimators'] = int(best['n_estimators'])
best['max_depth'] = int(best['max_depth'])
best['min_samples_split'] = int(best['min_samples_split'])
best['min_samples_leaf'] = int(best['min_samples_leaf'])
best['n_estimators'] = int(best['n_estimators'])
best['criterion'] = ['gini', 'entropy'][best['criterion']]
best['bootstrap'] = True
best['oob_score'] = True
best['n_jobs'] = -1
del s
return best
def out_fold_pred(params, X, y):
# cross validation here
preds = np.zeros((y.shape[0]))
for train_ix, test_ix in makeKFold(5, y, 1):
X_train, y_train = X[train_ix, :], y[train_ix]
X_test = X[test_ix, :]
weight = y_train.shape[0] / (2 * np.bincount(y_train))
sample_weight = np.array([weight[i] for i in y_train])
clf = RandomForestClassifier(**params)
cclf = CalibratedClassifierCV(base_estimator=clf,
method='isotonic',
cv=makeKFold(3, y_train, 1))
cclf.fit(X_train, y_train, sample_weight)
pred = cclf.predict_proba(X_test)[:, 1]
preds[test_ix] = pred
return preds
def get_model(params, X, y):
clf = RandomForestClassifier(**params)
cclf = CalibratedClassifierCV(base_estimator=clf,
method='isotonic',
cv=makeKFold(3, y, 1))
weight = y.shape[0] / (2 * np.bincount(y))
sample_weight = np.array([weight[i] for i in y])
cclf.fit(X, y, sample_weight)
return cclf
args = parse_args()
data_dir = '../level3-feature/' + str(args.yix)
X_train = np.load(path.join(data_dir, 'X_train.npy'))
X_test = np.load(path.join(data_dir, 'X_test.npy'))
y_train = np.load(path.join(data_dir, 'y_train.npy'))
print(X_train.shape, X_test.shape, y_train.shape)
X_train_ext = np.load('../extra_ftrs/' + str(args.yix) + '/X_train_ext.npy')
X_test_ext = np.load('../extra_ftrs/' + str(args.yix) + '/X_test_ext.npy')
print(X_train_ext.shape, X_test_ext.shape)
X_train = np.hstack((X_train, X_train_ext))
X_test = np.hstack((X_test, X_test_ext))
print('Add Extra')
print(X_train.shape, X_test.shape, y_train.shape)
# Now we have X_train, X_test, y_train
trials = Trials()
params = optimize(trials, X_train, y_train, 50)
out_fold = out_fold_pred(params, X_train, y_train)
clf = get_model(params, X_train, y_train)
preds = clf.predict_proba(X_test)[:, 1]
save_dir = '../level3-model-final/' + str(args.yix)
print(save_dir)
if not path.exists(save_dir):
os.makedirs(save_dir)
# save model, parameter, outFold_pred, pred
with open(path.join(save_dir, 'model_rf.pkl'), 'wb') as f_model:
pickle.dump(clf.calibrated_classifiers_, f_model)
with open(path.join(save_dir, 'param_rf.pkl'), 'wb') as f_param:
pickle.dump(params, f_param)
np.save(path.join(save_dir, 'pred_rf.npy'), preds)
np.save(path.join(save_dir, 'outFold_rf.npy'), out_fold)
| mit |
jreback/pandas | pandas/io/formats/html.py | 2 | 23192 | """
Module for formatting output data in HTML.
"""
from textwrap import dedent
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union, cast
from pandas._config import get_option
from pandas._libs import lib
from pandas import MultiIndex, option_context
from pandas.io.common import is_url
from pandas.io.formats.format import DataFrameFormatter, get_level_lengths
from pandas.io.formats.printing import pprint_thing
class HTMLFormatter:
"""
Internal class for formatting output data in html.
This class is intended for shared functionality between
DataFrame.to_html() and DataFrame._repr_html_().
Any logic in common with other output formatting methods
should ideally be inherited from classes in format.py
and this class responsible for only producing html markup.
"""
indent_delta = 2
def __init__(
self,
formatter: DataFrameFormatter,
classes: Optional[Union[str, List[str], Tuple[str, ...]]] = None,
border: Optional[int] = None,
table_id: Optional[str] = None,
render_links: bool = False,
) -> None:
self.fmt = formatter
self.classes = classes
self.frame = self.fmt.frame
self.columns = self.fmt.tr_frame.columns
self.elements: List[str] = []
self.bold_rows = self.fmt.bold_rows
self.escape = self.fmt.escape
self.show_dimensions = self.fmt.show_dimensions
if border is None:
border = cast(int, get_option("display.html.border"))
self.border = border
self.table_id = table_id
self.render_links = render_links
self.col_space = {
column: f"{value}px" if isinstance(value, int) else value
for column, value in self.fmt.col_space.items()
}
def to_string(self) -> str:
lines = self.render()
if any(isinstance(x, str) for x in lines):
lines = [str(x) for x in lines]
return "\n".join(lines)
def render(self) -> List[str]:
self._write_table()
if self.should_show_dimensions:
by = chr(215) # ×
self.write(
f"<p>{len(self.frame)} rows {by} {len(self.frame.columns)} columns</p>"
)
return self.elements
@property
def should_show_dimensions(self):
return self.fmt.should_show_dimensions
@property
def show_row_idx_names(self) -> bool:
return self.fmt.show_row_idx_names
@property
def show_col_idx_names(self) -> bool:
return self.fmt.show_col_idx_names
@property
def row_levels(self) -> int:
if self.fmt.index:
# showing (row) index
return self.frame.index.nlevels
elif self.show_col_idx_names:
# see gh-22579
# Column misalignment also occurs for
# a standard index when the columns index is named.
# If the row index is not displayed a column of
# blank cells need to be included before the DataFrame values.
return 1
# not showing (row) index
return 0
def _get_columns_formatted_values(self) -> Iterable:
return self.columns
@property
def is_truncated(self) -> bool:
return self.fmt.is_truncated
@property
def ncols(self) -> int:
return len(self.fmt.tr_frame.columns)
def write(self, s: Any, indent: int = 0) -> None:
rs = pprint_thing(s)
self.elements.append(" " * indent + rs)
def write_th(
self, s: Any, header: bool = False, indent: int = 0, tags: Optional[str] = None
) -> None:
"""
Method for writing a formatted <th> cell.
If col_space is set on the formatter then that is used for
the value of min-width.
Parameters
----------
s : object
The data to be written inside the cell.
header : bool, default False
Set to True if the <th> is for use inside <thead>. This will
cause min-width to be set if there is one.
indent : int, default 0
The indentation level of the cell.
tags : str, default None
Tags to include in the cell.
Returns
-------
A written <th> cell.
"""
col_space = self.col_space.get(s, None)
if header and col_space is not None:
tags = tags or ""
tags += f'style="min-width: {col_space};"'
self._write_cell(s, kind="th", indent=indent, tags=tags)
def write_td(self, s: Any, indent: int = 0, tags: Optional[str] = None) -> None:
self._write_cell(s, kind="td", indent=indent, tags=tags)
def _write_cell(
self, s: Any, kind: str = "td", indent: int = 0, tags: Optional[str] = None
) -> None:
if tags is not None:
start_tag = f"<{kind} {tags}>"
else:
start_tag = f"<{kind}>"
if self.escape:
# escape & first to prevent double escaping of &
esc = {"&": r"&", "<": r"<", ">": r">"}
else:
esc = {}
rs = pprint_thing(s, escape_chars=esc).strip()
if self.render_links and is_url(rs):
rs_unescaped = pprint_thing(s, escape_chars={}).strip()
start_tag += f'<a href="{rs_unescaped}" target="_blank">'
end_a = "</a>"
else:
end_a = ""
self.write(f"{start_tag}{rs}{end_a}</{kind}>", indent)
def write_tr(
self,
line: Iterable,
indent: int = 0,
indent_delta: int = 0,
header: bool = False,
align: Optional[str] = None,
tags: Optional[Dict[int, str]] = None,
nindex_levels: int = 0,
) -> None:
if tags is None:
tags = {}
if align is None:
self.write("<tr>", indent)
else:
self.write(f'<tr style="text-align: {align};">', indent)
indent += indent_delta
for i, s in enumerate(line):
val_tag = tags.get(i, None)
if header or (self.bold_rows and i < nindex_levels):
self.write_th(s, indent=indent, header=header, tags=val_tag)
else:
self.write_td(s, indent, tags=val_tag)
indent -= indent_delta
self.write("</tr>", indent)
def _write_table(self, indent: int = 0) -> None:
_classes = ["dataframe"] # Default class.
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
_classes.append("tex2jax_ignore")
if self.classes is not None:
if isinstance(self.classes, str):
self.classes = self.classes.split()
if not isinstance(self.classes, (list, tuple)):
raise TypeError(
"classes must be a string, list, "
f"or tuple, not {type(self.classes)}"
)
_classes.extend(self.classes)
if self.table_id is None:
id_section = ""
else:
id_section = f' id="{self.table_id}"'
self.write(
f'<table border="{self.border}" class="{" ".join(_classes)}"{id_section}>',
indent,
)
if self.fmt.header or self.show_row_idx_names:
self._write_header(indent + self.indent_delta)
self._write_body(indent + self.indent_delta)
self.write("</table>", indent)
def _write_col_header(self, indent: int) -> None:
is_truncated_horizontally = self.fmt.is_truncated_horizontally
if isinstance(self.columns, MultiIndex):
template = 'colspan="{span:d}" halign="left"'
if self.fmt.sparsify:
# GH3547
sentinel = lib.no_default
else:
sentinel = False
levels = self.columns.format(sparsify=sentinel, adjoin=False, names=False)
level_lengths = get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
for lnum, (records, values) in enumerate(zip(level_lengths, levels)):
if is_truncated_horizontally:
# modify the header lines
ins_col = self.fmt.tr_col_num
if self.fmt.sparsify:
recs_new = {}
# Increment tags after ... col.
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
elif tag + span > ins_col:
recs_new[tag] = span + 1
if lnum == inner_lvl:
values = (
values[:ins_col] + ("...",) + values[ins_col:]
)
else:
# sparse col headers do not receive a ...
values = (
values[:ins_col]
+ (values[ins_col - 1],)
+ values[ins_col:]
)
else:
recs_new[tag] = span
# if ins_col lies between tags, all col headers
# get ...
if tag + span == ins_col:
recs_new[ins_col] = 1
values = values[:ins_col] + ("...",) + values[ins_col:]
records = recs_new
inner_lvl = len(level_lengths) - 1
if lnum == inner_lvl:
records[ins_col] = 1
else:
recs_new = {}
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
else:
recs_new[tag] = span
recs_new[ins_col] = 1
records = recs_new
values = values[:ins_col] + ["..."] + values[ins_col:]
# see gh-22579
# Column Offset Bug with to_html(index=False) with
# MultiIndex Columns and Index.
# Initially fill row with blank cells before column names.
# TODO: Refactor to remove code duplication with code
# block below for standard columns index.
row = [""] * (self.row_levels - 1)
if self.fmt.index or self.show_col_idx_names:
# see gh-22747
# If to_html(index_names=False) do not show columns
# index names.
# TODO: Refactor to use _get_column_name_list from
# DataFrameFormatter class and create a
# _get_formatted_column_labels function for code
# parity with DataFrameFormatter class.
if self.fmt.show_index_names:
name = self.columns.names[lnum]
row.append(pprint_thing(name or ""))
else:
row.append("")
tags = {}
j = len(row)
for i, v in enumerate(values):
if i in records:
if records[i] > 1:
tags[j] = template.format(span=records[i])
else:
continue
j += 1
row.append(v)
self.write_tr(row, indent, self.indent_delta, tags=tags, header=True)
else:
# see gh-22579
# Column misalignment also occurs for
# a standard index when the columns index is named.
# Initially fill row with blank cells before column names.
# TODO: Refactor to remove code duplication with code block
# above for columns MultiIndex.
row = [""] * (self.row_levels - 1)
if self.fmt.index or self.show_col_idx_names:
# see gh-22747
# If to_html(index_names=False) do not show columns
# index names.
# TODO: Refactor to use _get_column_name_list from
# DataFrameFormatter class.
if self.fmt.show_index_names:
row.append(self.columns.name or "")
else:
row.append("")
row.extend(self._get_columns_formatted_values())
align = self.fmt.justify
if is_truncated_horizontally:
ins_col = self.row_levels + self.fmt.tr_col_num
row.insert(ins_col, "...")
self.write_tr(row, indent, self.indent_delta, header=True, align=align)
def _write_row_header(self, indent: int) -> None:
is_truncated_horizontally = self.fmt.is_truncated_horizontally
row = [x if x is not None else "" for x in self.frame.index.names] + [""] * (
self.ncols + (1 if is_truncated_horizontally else 0)
)
self.write_tr(row, indent, self.indent_delta, header=True)
def _write_header(self, indent: int) -> None:
self.write("<thead>", indent)
if self.fmt.header:
self._write_col_header(indent + self.indent_delta)
if self.show_row_idx_names:
self._write_row_header(indent + self.indent_delta)
self.write("</thead>", indent)
def _get_formatted_values(self) -> Dict[int, List[str]]:
with option_context("display.max_colwidth", None):
fmt_values = {i: self.fmt.format_col(i) for i in range(self.ncols)}
return fmt_values
def _write_body(self, indent: int) -> None:
self.write("<tbody>", indent)
fmt_values = self._get_formatted_values()
# write values
if self.fmt.index and isinstance(self.frame.index, MultiIndex):
self._write_hierarchical_rows(fmt_values, indent + self.indent_delta)
else:
self._write_regular_rows(fmt_values, indent + self.indent_delta)
self.write("</tbody>", indent)
def _write_regular_rows(
self, fmt_values: Mapping[int, List[str]], indent: int
) -> None:
is_truncated_horizontally = self.fmt.is_truncated_horizontally
is_truncated_vertically = self.fmt.is_truncated_vertically
nrows = len(self.fmt.tr_frame)
if self.fmt.index:
fmt = self.fmt._get_formatter("__index__")
if fmt is not None:
index_values = self.fmt.tr_frame.index.map(fmt)
else:
index_values = self.fmt.tr_frame.index.format()
row: List[str] = []
for i in range(nrows):
if is_truncated_vertically and i == (self.fmt.tr_row_num):
str_sep_row = ["..."] * len(row)
self.write_tr(
str_sep_row,
indent,
self.indent_delta,
tags=None,
nindex_levels=self.row_levels,
)
row = []
if self.fmt.index:
row.append(index_values[i])
# see gh-22579
# Column misalignment also occurs for
# a standard index when the columns index is named.
# Add blank cell before data cells.
elif self.show_col_idx_names:
row.append("")
row.extend(fmt_values[j][i] for j in range(self.ncols))
if is_truncated_horizontally:
dot_col_ix = self.fmt.tr_col_num + self.row_levels
row.insert(dot_col_ix, "...")
self.write_tr(
row, indent, self.indent_delta, tags=None, nindex_levels=self.row_levels
)
def _write_hierarchical_rows(
self, fmt_values: Mapping[int, List[str]], indent: int
) -> None:
template = 'rowspan="{span}" valign="top"'
is_truncated_horizontally = self.fmt.is_truncated_horizontally
is_truncated_vertically = self.fmt.is_truncated_vertically
frame = self.fmt.tr_frame
nrows = len(frame)
assert isinstance(frame.index, MultiIndex)
idx_values = frame.index.format(sparsify=False, adjoin=False, names=False)
idx_values = list(zip(*idx_values))
if self.fmt.sparsify:
# GH3547
sentinel = lib.no_default
levels = frame.index.format(sparsify=sentinel, adjoin=False, names=False)
level_lengths = get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
if is_truncated_vertically:
# Insert ... row and adjust idx_values and
# level_lengths to take this into account.
ins_row = self.fmt.tr_row_num
inserted = False
for lnum, records in enumerate(level_lengths):
rec_new = {}
for tag, span in list(records.items()):
if tag >= ins_row:
rec_new[tag + 1] = span
elif tag + span > ins_row:
rec_new[tag] = span + 1
# GH 14882 - Make sure insertion done once
if not inserted:
dot_row = list(idx_values[ins_row - 1])
dot_row[-1] = "..."
idx_values.insert(ins_row, tuple(dot_row))
inserted = True
else:
dot_row = list(idx_values[ins_row])
dot_row[inner_lvl - lnum] = "..."
idx_values[ins_row] = tuple(dot_row)
else:
rec_new[tag] = span
# If ins_row lies between tags, all cols idx cols
# receive ...
if tag + span == ins_row:
rec_new[ins_row] = 1
if lnum == 0:
idx_values.insert(
ins_row, tuple(["..."] * len(level_lengths))
)
# GH 14882 - Place ... in correct level
elif inserted:
dot_row = list(idx_values[ins_row])
dot_row[inner_lvl - lnum] = "..."
idx_values[ins_row] = tuple(dot_row)
level_lengths[lnum] = rec_new
level_lengths[inner_lvl][ins_row] = 1
for ix_col in range(len(fmt_values)):
fmt_values[ix_col].insert(ins_row, "...")
nrows += 1
for i in range(nrows):
row = []
tags = {}
sparse_offset = 0
j = 0
for records, v in zip(level_lengths, idx_values[i]):
if i in records:
if records[i] > 1:
tags[j] = template.format(span=records[i])
else:
sparse_offset += 1
continue
j += 1
row.append(v)
row.extend(fmt_values[j][i] for j in range(self.ncols))
if is_truncated_horizontally:
row.insert(
self.row_levels - sparse_offset + self.fmt.tr_col_num, "..."
)
self.write_tr(
row,
indent,
self.indent_delta,
tags=tags,
nindex_levels=len(levels) - sparse_offset,
)
else:
row = []
for i in range(len(frame)):
if is_truncated_vertically and i == (self.fmt.tr_row_num):
str_sep_row = ["..."] * len(row)
self.write_tr(
str_sep_row,
indent,
self.indent_delta,
tags=None,
nindex_levels=self.row_levels,
)
idx_values = list(
zip(*frame.index.format(sparsify=False, adjoin=False, names=False))
)
row = []
row.extend(idx_values[i])
row.extend(fmt_values[j][i] for j in range(self.ncols))
if is_truncated_horizontally:
row.insert(self.row_levels + self.fmt.tr_col_num, "...")
self.write_tr(
row,
indent,
self.indent_delta,
tags=None,
nindex_levels=frame.index.nlevels,
)
class NotebookFormatter(HTMLFormatter):
"""
Internal class for formatting output data in html for display in Jupyter
Notebooks. This class is intended for functionality specific to
DataFrame._repr_html_() and DataFrame.to_html(notebook=True)
"""
def _get_formatted_values(self) -> Dict[int, List[str]]:
return {i: self.fmt.format_col(i) for i in range(self.ncols)}
def _get_columns_formatted_values(self) -> List[str]:
return self.columns.format()
def write_style(self) -> None:
# We use the "scoped" attribute here so that the desired
# style properties for the data frame are not then applied
# throughout the entire notebook.
template_first = """\
<style scoped>"""
template_last = """\
</style>"""
template_select = """\
.dataframe %s {
%s: %s;
}"""
element_props = [
("tbody tr th:only-of-type", "vertical-align", "middle"),
("tbody tr th", "vertical-align", "top"),
]
if isinstance(self.columns, MultiIndex):
element_props.append(("thead tr th", "text-align", "left"))
if self.show_row_idx_names:
element_props.append(
("thead tr:last-of-type th", "text-align", "right")
)
else:
element_props.append(("thead th", "text-align", "right"))
template_mid = "\n\n".join(map(lambda t: template_select % t, element_props))
template = dedent("\n".join((template_first, template_mid, template_last)))
self.write(template)
def render(self) -> List[str]:
self.write("<div>")
self.write_style()
super().render()
self.write("</div>")
return self.elements
| bsd-3-clause |
modelkayak/python_signal_examples | energy_fft.py | 1 | 2685 | import numpy as np
import scipy
from matplotlib import pyplot as plt
from numpy import pi as pi
# Plotting logic switches
time_plot = True
freq_plot = True
# Oversample to make things look purty
oversample = 100
# Frequencies to simulate
f_min = 5 #[Hz]
f_max = 10 #[Hz]
f_list = np.arange(f_min,f_max) # Note: arange does not include the stop pt
# Time array
t_start = 0 #[s]
t_stop = oversample/f_min #[s]
f_samp = oversample*f_max #[Hz]
t_step = 1/f_samp #[s]
# Create a time span, but do not care about the number of points.
# This will likely create sinc functions in the FFT.
#t = np.arange(t_start,t_stop,t_step)
# Use N points to make a faster FFT and to avoid
# the addition of zeros at the end of the FFT array.
# The addition of zeros will result in the mulitplication
# of a box filter in the time domain, which results in
# a sinc function in the frequency domain
N = int(np.power(2,np.ceil(np.log2(t_stop/t_step))))
# Create a time span, but care about the number of points such that
# the signal does not look like a sinc function in the freq. domain.
# Source: U of RI ECE, ELE 436: Comm. Sys., FFT Tutorial
t = np.linspace(t_start,t_stop,num=N,endpoint=True)
# Create random amplitudes
a_list = [np.random.randint(1,10) for i in f_list]
# Create a time signal with random amplitudes for each frequency
x = 0
for a,f in zip(a_list,f_list):
x += a*np.sin(2*pi*f*t)
# Take the FFT of the signal
# Normalize by the size of x due to how a DTFT is taken
# Take absoulte value because we only care about the real part
# of the signal.
X = np.abs(np.fft.fft(x)/x.size)
# Get the labels for the frequencies, num pts and delta between them
freq_labels = np.fft.fftfreq(N,t[1]-t[0])
# Plot the time signal
if time_plot and not freq_plot:
plt.figure('Time Domain View')
plt.title("Time domain view of signal x")
plt.plot(t,x)
plt.xlim([0,5/f_min])
plt.xlabel("Time [s]")
plt.ylabel("Amplitude")
plt.show()
# Or plot the frequecy
if freq_plot and not time_plot:
plt.figure('Frequency Domain View')
plt.title("Frequency domain view of signal x")
plt.plot(freq_labels,X)
plt.xlim([-f_max,f_max])
plt.show()
# Or plot both
if freq_plot and time_plot:
plt.subplot(211)
plt.title("Time and frequency domain view of real signal x")
plt.plot(t,x)
plt.xlim([0,5/f_min]) # Limit the time shown to a small amount
plt.xlabel("Time [s]")
plt.ylabel("Amplitude")
plt.subplot(212)
plt.plot(freq_labels,X)
plt.xlim([-f_max,f_max]) # Limit the freq shown to a small amount
plt.xlabel("Frequency [Hz]")
plt.ylabel("Magnitude (linear)")
plt.show()
| mit |
FrankTsui/robust_rescaled_svm | common.py | 1 | 1636 | import numpy as np
import matplotlib.pyplot as plt
def plot_decision_function(classifier, fea, gnd, title):
'''
plot the decision function in 2-d plane
classifiers: the svm models
fea: array like, shape = (smp_num, fea_num)
gnd: array like, shape = (smp_num,)
title: title of plot
'''
fea_min = fea.min(axis = 0)
fea_max = fea.max(axis = 0)
mesh_num = 100
# meshgrid
xx, yy = np.meshgrid(np.linspace(fea_min[0], fea_max[0], mesh_num), \
np.linspace(fea_min[1], fea_max[1], mesh_num))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()], last_model_flag = False)
Z_first = Z[:, 0].copy()
Z_last = Z[:, -1].copy()
Z_first = Z_first.reshape(xx.shape)
Z_last = Z_last.reshape(xx.shape)
del Z
# plot the line, the points
leg_svm = plt.contour(xx, yy, Z_first, levels = [0.0], colors = 'k')
leg_rsvm = plt.contour(xx, yy, Z_last, levels = [0.0], colors = 'r')
posi_index = gnd == 1
nega_index = gnd == -1
marker_size = 70
plt.scatter(fea[:, 0], fea[:, 1], marker = 'o', \
s = classifier.smp_weights_mat[:, -1] * marker_size * 4, c = 'w', alpha = 1.0, edgecolors = 'm', label = 'weights')
plt.scatter(fea[posi_index, 0], fea[posi_index, 1], marker = '^', s = marker_size, c = 'g', alpha = 0.8, label = 'posi')
plt.scatter(fea[nega_index, 0], fea[nega_index, 1], marker = 'x', s = marker_size, c = 'b', label = 'nega')
leg_svm.collections[0].set_label('svm')
leg_rsvm.collections[0].set_label('rsvm')
plt.legend(loc = 'upper left')
plt.axis('on')
plt.title(title) | apache-2.0 |
JeanKossaifi/scikit-learn | sklearn/tree/tests/test_tree.py | 48 | 47506 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import ignore_warnings
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
| bsd-3-clause |
srinathv/vispy | vispy/visuals/isocurve.py | 18 | 7809 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from .line import LineVisual
from ..color import ColorArray
from ..color.colormap import _normalize, get_colormap
from ..geometry.isocurve import isocurve
from ..testing import has_matplotlib
# checking for matplotlib
_HAS_MPL = has_matplotlib()
if _HAS_MPL:
from matplotlib import _cntr as cntr
class IsocurveVisual(LineVisual):
"""Displays an isocurve of a 2D scalar array.
Parameters
----------
data : ndarray | None
2D scalar array.
levels : ndarray, shape (Nlev,) | None
The levels at which the isocurve is constructed from "*data*".
color_lev : Color, colormap name, tuple, list or array
The color to use when drawing the line. If a list is given, it
must be of shape (Nlev), if an array is given, it must be of
shape (Nlev, ...). and provide one color per level (rgba, colorname).
clim : tuple
(min, max) limits to apply when mapping level values through a
colormap.
**kwargs : dict
Keyword arguments to pass to `LineVisual`.
Notes
-----
"""
def __init__(self, data=None, levels=None, color_lev=None, clim=None,
**kwargs):
self._data = None
self._levels = levels
self._color_lev = color_lev
self._clim = clim
self._need_color_update = True
self._need_level_update = True
self._need_recompute = True
self._X = None
self._Y = None
self._iso = None
self._level_min = None
self._data_is_uniform = False
self._lc = None
self._cl = None
self._li = None
self._connect = None
self._verts = None
kwargs['method'] = 'gl'
kwargs['antialias'] = False
LineVisual.__init__(self, **kwargs)
if data is not None:
self.set_data(data)
@property
def levels(self):
""" The threshold at which the isocurve is constructed from the
2D data.
"""
return self._levels
@levels.setter
def levels(self, levels):
self._levels = levels
self._need_level_update = True
self._need_recompute = True
self.update()
@property
def color(self):
return self._color_lev
@color.setter
def color(self, color):
self._color_lev = color
self._need_level_update = True
self._need_color_update = True
self.update()
def set_data(self, data):
""" Set the scalar array data
Parameters
----------
data : ndarray
A 2D array of scalar values. The isocurve is constructed to show
all locations in the scalar field equal to ``self.levels``.
"""
self._data = data
# if using matplotlib isoline algorithm we have to check for meshgrid
# and we can setup the tracer object here
if _HAS_MPL:
if self._X is None or self._X.T.shape != data.shape:
self._X, self._Y = np.meshgrid(np.arange(data.shape[0]),
np.arange(data.shape[1]))
self._iso = cntr.Cntr(self._X, self._Y, self._data.astype(float))
if self._clim is None:
self._clim = (data.min(), data.max())
# sanity check,
# should we raise an error here, since no isolines can be drawn?
# for now, _prepare_draw returns False if no isoline can be drawn
if self._data.min() != self._data.max():
self._data_is_uniform = False
else:
self._data_is_uniform = True
self._need_recompute = True
self.update()
def _get_verts_and_connect(self, paths):
""" retrieve vertices and connects from given paths-list
"""
verts = np.vstack(paths)
gaps = np.add.accumulate(np.array([len(x) for x in paths])) - 1
connect = np.ones(gaps[-1], dtype=bool)
connect[gaps[:-1]] = False
return verts, connect
def _compute_iso_line(self):
""" compute LineVisual vertices, connects and color-index
"""
level_index = []
connects = []
verts = []
# calculate which level are within data range
# this works for now and the existing examples, but should be tested
# thoroughly also with the data-sanity check in set_data-function
choice = np.nonzero((self.levels > self._data.min()) &
(self._levels < self._data.max()))
levels_to_calc = np.array(self.levels)[choice]
# save minimum level index
self._level_min = choice[0][0]
for level in levels_to_calc:
# if we use matplotlib isoline algorithm we need to add half a
# pixel in both (x,y) dimensions because isolines are aligned to
# pixel centers
if _HAS_MPL:
nlist = self._iso.trace(level, level, 0)
paths = nlist[:len(nlist)//2]
v, c = self._get_verts_and_connect(paths)
v += np.array([0.5, 0.5])
else:
paths = isocurve(self._data.astype(float).T, level,
extend_to_edge=True, connected=True)
v, c = self._get_verts_and_connect(paths)
level_index.append(v.shape[0])
connects.append(np.hstack((c, [False])))
verts.append(v)
self._li = np.hstack(level_index)
self._connect = np.hstack(connects)
self._verts = np.vstack(verts)
def _compute_iso_color(self):
""" compute LineVisual color from level index and corresponding color
"""
level_color = []
colors = self._lc
for i, index in enumerate(self._li):
level_color.append(np.zeros((index, 4)) +
colors[i+self._level_min])
self._cl = np.vstack(level_color)
def _levels_to_colors(self):
# computes ColorArrays for given levels
# try _color_lev as colormap, except as everything else
try:
f_color_levs = get_colormap(self._color_lev)
except:
colors = ColorArray(self._color_lev).rgba
else:
lev = _normalize(self._levels, self._clim[0], self._clim[1])
# map function expects (Nlev,1)!
colors = f_color_levs.map(lev[:, np.newaxis])
# broadcast to (nlev, 4) array
if len(colors) == 1:
colors = colors * np.ones((len(self._levels), 1))
# detect color_lev/levels mismatch and raise error
if (len(colors) != len(self._levels)):
raise TypeError("Color/level mismatch. Color must be of shape "
"(Nlev, ...) and provide one color per level")
self._lc = colors
def _prepare_draw(self, view):
if (self._data is None or self._levels is None or
self._color_lev is None or self._data_is_uniform):
return False
if self._need_level_update:
self._levels_to_colors()
self._need_level_update = False
if self._need_recompute:
self._compute_iso_line()
self._compute_iso_color()
LineVisual.set_data(self, pos=self._verts, connect=self._connect,
color=self._cl)
self._need_recompute = False
if self._need_color_update:
self._compute_iso_color()
LineVisual.set_data(self, color=self._cl)
self._need_color_update = False
return LineVisual._prepare_draw(self, view)
| bsd-3-clause |
eljost/pysisyphus | deprecated/tests/test_dynamics/test_dynamics.py | 1 | 2531 | from matplotlib.patches import Circle
import matplotlib.pyplot as plt
import numpy as np
import pytest
from pysisyphus.calculators.AnaPot import AnaPot
from pysisyphus.dynamics.velocity_verlet import md
def test_velocity_verlet():
geom = AnaPot.get_geom((0.52, 1.80, 0))
x0 = geom.coords.copy()
v0 = .1 * np.random.rand(*geom.coords.shape)
t = 3
dts = (.005, .01, .02, .04, .08)
all_xs = list()
for dt in dts:
geom.coords = x0.copy()
md_kwargs = {
"v0": v0.copy(),
"t": t,
"dt": dt,
}
md_result = md(geom, **md_kwargs)
all_xs.append(md_result.coords)
calc = geom.calculator
calc.plot()
ax = calc.ax
for dt, xs in zip(dts, all_xs):
ax.plot(*xs.T[:2], "o-", label=f"dt={dt:.3f}")
# ax.plot(*xs.T[:2], "-", label=f"dt={dt:.3f}")
ax.legend()
plt.show()
def ase_md_playground():
geom = AnaPot.get_geom((0.52, 1.80, 0), atoms=("H", ))
atoms = geom.as_ase_atoms()
# ase_calc = FakeASE(geom.calculator)
# from ase.optimize import BFGS
# dyn = BFGS(atoms)
# dyn.run(fmax=0.05)
import ase
from ase import units
from ase.io.trajectory import Trajectory
from ase.md.velocitydistribution import MaxwellBoltzmannDistribution
from ase.md.verlet import VelocityVerlet
MaxwellBoltzmannDistribution(atoms, 300 * units.kB)
momenta = atoms.get_momenta()
momenta[0, 2] = 0.
# Zero 3rd dimension
atoms.set_momenta(momenta)
dyn = VelocityVerlet(atoms, .005 * units.fs) # 5 fs time step.
def printenergy(a):
"""Function to print the potential, kinetic and total energy"""
epot = a.get_potential_energy() / len(a)
ekin = a.get_kinetic_energy() / len(a)
print('Energy per atom: Epot = %.3feV Ekin = %.3feV (T=%3.0fK) '
'Etot = %.3feV' % (epot, ekin, ekin / (1.5 * units.kB), epot + ekin))
# Now run the dynamics
printenergy(atoms)
traj_fn = 'asemd.traj'
traj = Trajectory(traj_fn, 'w', atoms)
dyn.attach(traj.write, interval=5)
# dyn.attach(bumms().bimms, interval=1)
dyn.run(10000)
printenergy(atoms)
traj.close()
traj = ase.io.read(traj_fn+"@:")#, "r")
pos = [a.get_positions() for a in traj]
from pysisyphus.constants import BOHR2ANG
pos = np.array(pos) / BOHR2ANG
calc = geom.calculator
calc.plot()
ax = calc.ax
ax.plot(*pos[:,0,:2].T)
plt.show()
if __name__ == "__main__":
ase_md_playground()
| gpl-3.0 |
AtsushiSakai/PythonRobotics | PathPlanning/Eta3SplinePath/eta3_spline_path.py | 1 | 13649 | """
eta^3 polynomials planner
author: Joe Dinius, Ph.D (https://jwdinius.github.io)
Atsushi Sakai (@Atsushi_twi)
Ref:
- [eta^3-Splines for the Smooth Path Generation of Wheeled Mobile Robots]
(https://ieeexplore.ieee.org/document/4339545/)
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
# NOTE: *_pose is a 3-array:
# 0 - x coord, 1 - y coord, 2 - orientation angle \theta
show_animation = True
class Eta3Path(object):
"""
Eta3Path
input
segments: a list of `Eta3PathSegment` instances
defining a continuous path
"""
def __init__(self, segments):
# ensure input has the correct form
assert(isinstance(segments, list) and isinstance(
segments[0], Eta3PathSegment))
# ensure that each segment begins from the previous segment's end (continuity)
for r, s in zip(segments[:-1], segments[1:]):
assert(np.array_equal(r.end_pose, s.start_pose))
self.segments = segments
def calc_path_point(self, u):
"""
Eta3Path::calc_path_point
input
normalized interpolation point along path object, 0 <= u <= len(self.segments)
returns
2d (x,y) position vector
"""
assert(0 <= u <= len(self.segments))
if np.isclose(u, len(self.segments)):
segment_idx = len(self.segments) - 1
u = 1.
else:
segment_idx = int(np.floor(u))
u -= segment_idx
return self.segments[segment_idx].calc_point(u)
class Eta3PathSegment(object):
"""
Eta3PathSegment - constructs an eta^3 path segment based on desired
shaping, eta, and curvature vector, kappa. If either, or both,
of eta and kappa are not set during initialization,
they will default to zeros.
input
start_pose - starting pose array (x, y, \theta)
end_pose - ending pose array (x, y, \theta)
eta - shaping parameters, default=None
kappa - curvature parameters, default=None
"""
def __init__(self, start_pose, end_pose, eta=None, kappa=None):
# make sure inputs are of the correct size
assert(len(start_pose) == 3 and len(start_pose) == len(end_pose))
self.start_pose = start_pose
self.end_pose = end_pose
# if no eta is passed, initialize it to array of zeros
if not eta:
eta = np.zeros((6,))
else:
# make sure that eta has correct size
assert(len(eta) == 6)
# if no kappa is passed, initialize to array of zeros
if not kappa:
kappa = np.zeros((4,))
else:
assert(len(kappa) == 4)
# set up angle cosines and sines for simpler computations below
ca = np.cos(start_pose[2])
sa = np.sin(start_pose[2])
cb = np.cos(end_pose[2])
sb = np.sin(end_pose[2])
# 2 dimensions (x,y) x 8 coefficients per dimension
self.coeffs = np.empty((2, 8))
# constant terms (u^0)
self.coeffs[0, 0] = start_pose[0]
self.coeffs[1, 0] = start_pose[1]
# linear (u^1)
self.coeffs[0, 1] = eta[0] * ca
self.coeffs[1, 1] = eta[0] * sa
# quadratic (u^2)
self.coeffs[0, 2] = 1. / 2 * eta[2] * \
ca - 1. / 2 * eta[0]**2 * kappa[0] * sa
self.coeffs[1, 2] = 1. / 2 * eta[2] * \
sa + 1. / 2 * eta[0]**2 * kappa[0] * ca
# cubic (u^3)
self.coeffs[0, 3] = 1. / 6 * eta[4] * ca - 1. / 6 * \
(eta[0]**3 * kappa[1] + 3. * eta[0] * eta[2] * kappa[0]) * sa
self.coeffs[1, 3] = 1. / 6 * eta[4] * sa + 1. / 6 * \
(eta[0]**3 * kappa[1] + 3. * eta[0] * eta[2] * kappa[0]) * ca
# quartic (u^4)
tmp1 = 35. * (end_pose[0] - start_pose[0])
tmp2 = (20. * eta[0] + 5 * eta[2] + 2. / 3 * eta[4]) * ca
tmp3 = (5. * eta[0] ** 2 * kappa[0] + 2. / 3 * eta[0] ** 3 * kappa[1]
+ 2. * eta[0] * eta[2] * kappa[0]) * sa
tmp4 = (15. * eta[1] - 5. / 2 * eta[3] + 1. / 6 * eta[5]) * cb
tmp5 = (5. / 2 * eta[1] ** 2 * kappa[2] - 1. / 6 * eta[1] ** 3 *
kappa[3] - 1. / 2 * eta[1] * eta[3] * kappa[2]) * sb
self.coeffs[0, 4] = tmp1 - tmp2 + tmp3 - tmp4 - tmp5
tmp1 = 35. * (end_pose[1] - start_pose[1])
tmp2 = (20. * eta[0] + 5. * eta[2] + 2. / 3 * eta[4]) * sa
tmp3 = (5. * eta[0] ** 2 * kappa[0] + 2. / 3 * eta[0] ** 3 * kappa[1]
+ 2. * eta[0] * eta[2] * kappa[0]) * ca
tmp4 = (15. * eta[1] - 5. / 2 * eta[3] + 1. / 6 * eta[5]) * sb
tmp5 = (5. / 2 * eta[1] ** 2 * kappa[2] - 1. / 6 * eta[1] ** 3 *
kappa[3] - 1. / 2 * eta[1] * eta[3] * kappa[2]) * cb
self.coeffs[1, 4] = tmp1 - tmp2 - tmp3 - tmp4 + tmp5
# quintic (u^5)
tmp1 = -84. * (end_pose[0] - start_pose[0])
tmp2 = (45. * eta[0] + 10. * eta[2] + eta[4]) * ca
tmp3 = (10. * eta[0] ** 2 * kappa[0] + eta[0] ** 3 * kappa[1] + 3. *
eta[0] * eta[2] * kappa[0]) * sa
tmp4 = (39. * eta[1] - 7. * eta[3] + 1. / 2 * eta[5]) * cb
tmp5 = + (7. * eta[1] ** 2 * kappa[2] - 1. / 2 * eta[1] ** 3 * kappa[3]
- 3. / 2 * eta[1] * eta[3] * kappa[2]) * sb
self.coeffs[0, 5] = tmp1 + tmp2 - tmp3 + tmp4 + tmp5
tmp1 = -84. * (end_pose[1] - start_pose[1])
tmp2 = (45. * eta[0] + 10. * eta[2] + eta[4]) * sa
tmp3 = (10. * eta[0] ** 2 * kappa[0] + eta[0] ** 3 * kappa[1] + 3. *
eta[0] * eta[2] * kappa[0]) * ca
tmp4 = (39. * eta[1] - 7. * eta[3] + 1. / 2 * eta[5]) * sb
tmp5 = - (7. * eta[1] ** 2 * kappa[2] - 1. / 2 * eta[1] ** 3 * kappa[3]
- 3. / 2 * eta[1] * eta[3] * kappa[2]) * cb
self.coeffs[1, 5] = tmp1 + tmp2 + tmp3 + tmp4 + tmp5
# sextic (u^6)
tmp1 = 70. * (end_pose[0] - start_pose[0])
tmp2 = (36. * eta[0] + 15. / 2 * eta[2] + 2. / 3 * eta[4]) * ca
tmp3 = + (15. / 2 * eta[0] ** 2 * kappa[0] + 2. / 3 * eta[0] ** 3 *
kappa[1] + 2. * eta[0] * eta[2] * kappa[0]) * sa
tmp4 = (34. * eta[1] - 13. / 2 * eta[3] + 1. / 2 * eta[5]) * cb
tmp5 = - (13. / 2 * eta[1] ** 2 * kappa[2] - 1. / 2 * eta[1] ** 3 *
kappa[3] - 3. / 2 * eta[1] * eta[3] * kappa[2]) * sb
self.coeffs[0, 6] = tmp1 - tmp2 + tmp3 - tmp4 + tmp5
tmp1 = 70. * (end_pose[1] - start_pose[1])
tmp2 = - (36. * eta[0] + 15. / 2 * eta[2] + 2. / 3 * eta[4]) * sa
tmp3 = - (15. / 2 * eta[0] ** 2 * kappa[0] + 2. / 3 * eta[0] ** 3 *
kappa[1] + 2. * eta[0] * eta[2] * kappa[0]) * ca
tmp4 = - (34. * eta[1] - 13. / 2 * eta[3] + 1. / 2 * eta[5]) * sb
tmp5 = + (13. / 2 * eta[1] ** 2 * kappa[2] - 1. / 2 * eta[1] ** 3 *
kappa[3] - 3. / 2 * eta[1] * eta[3] * kappa[2]) * cb
self.coeffs[1, 6] = tmp1 + tmp2 + tmp3 + tmp4 + tmp5
# septic (u^7)
tmp1 = -20. * (end_pose[0] - start_pose[0])
tmp2 = (10. * eta[0] + 2. * eta[2] + 1. / 6 * eta[4]) * ca
tmp3 = - (2. * eta[0] ** 2 * kappa[0] + 1. / 6 * eta[0] ** 3 * kappa[1]
+ 1. / 2 * eta[0] * eta[2] * kappa[0]) * sa
tmp4 = (10. * eta[1] - 2. * eta[3] + 1. / 6 * eta[5]) * cb
tmp5 = (2. * eta[1] ** 2 * kappa[2] - 1. / 6 * eta[1] ** 3 * kappa[3]
- 1. / 2 * eta[1] * eta[3] * kappa[2]) * sb
self.coeffs[0, 7] = tmp1 + tmp2 + tmp3 + tmp4 + tmp5
tmp1 = -20. * (end_pose[1] - start_pose[1])
tmp2 = (10. * eta[0] + 2. * eta[2] + 1. / 6 * eta[4]) * sa
tmp3 = (2. * eta[0] ** 2 * kappa[0] + 1. / 6 * eta[0] ** 3 * kappa[1]
+ 1. / 2 * eta[0] * eta[2] * kappa[0]) * ca
tmp4 = (10. * eta[1] - 2. * eta[3] + 1. / 6 * eta[5]) * sb
tmp5 = - (2. * eta[1] ** 2 * kappa[2] - 1. / 6 * eta[1] ** 3 * kappa[3]
- 1. / 2 * eta[1] * eta[3] * kappa[2]) * cb
self.coeffs[1, 7] = tmp1 + tmp2 + tmp3 + tmp4 + tmp5
self.s_dot = lambda u: max(np.linalg.norm(
self.coeffs[:, 1:].dot(np.array(
[1, 2. * u, 3. * u**2, 4. * u**3,
5. * u**4, 6. * u**5, 7. * u**6]))), 1e-6)
self.f_length = lambda ue: quad(lambda u: self.s_dot(u), 0, ue)
self.segment_length = self.f_length(1)[0]
def calc_point(self, u):
"""
Eta3PathSegment::calc_point
input
u - parametric representation of a point along the segment, 0 <= u <= 1
returns
(x,y) of point along the segment
"""
assert(0 <= u <= 1)
return self.coeffs.dot(np.array([1, u, u**2, u**3, u**4, u**5, u**6, u**7]))
def calc_deriv(self, u, order=1):
"""
Eta3PathSegment::calc_deriv
input
u - parametric representation of a point along the segment, 0 <= u <= 1
returns
(d^nx/du^n,d^ny/du^n) of point along the segment, for 0 < n <= 2
"""
assert(0 <= u <= 1)
assert(0 < order <= 2)
if order == 1:
return self.coeffs[:, 1:].dot(np.array([1, 2. * u, 3. * u**2, 4. * u**3, 5. * u**4, 6. * u**5, 7. * u**6]))
return self.coeffs[:, 2:].dot(np.array([2, 6. * u, 12. * u**2, 20. * u**3, 30. * u**4, 42. * u**5]))
def test1():
for i in range(10):
path_segments = []
# segment 1: lane-change curve
start_pose = [0, 0, 0]
end_pose = [4, 3.0, 0]
# NOTE: The ordering on kappa is [kappa_A, kappad_A, kappa_B, kappad_B], with kappad_* being the curvature derivative
kappa = [0, 0, 0, 0]
eta = [i, i, 0, 0, 0, 0]
path_segments.append(Eta3PathSegment(
start_pose=start_pose, end_pose=end_pose, eta=eta, kappa=kappa))
path = Eta3Path(path_segments)
# interpolate at several points along the path
ui = np.linspace(0, len(path_segments), 1001)
pos = np.empty((2, ui.size))
for j, u in enumerate(ui):
pos[:, j] = path.calc_path_point(u)
if show_animation:
# plot the path
plt.plot(pos[0, :], pos[1, :])
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.pause(1.0)
if show_animation:
plt.close("all")
def test2():
for i in range(10):
path_segments = []
# segment 1: lane-change curve
start_pose = [0, 0, 0]
end_pose = [4, 3.0, 0]
# NOTE: The ordering on kappa is [kappa_A, kappad_A, kappa_B, kappad_B], with kappad_* being the curvature derivative
kappa = [0, 0, 0, 0]
eta = [0, 0, (i - 5) * 20, (5 - i) * 20, 0, 0]
path_segments.append(Eta3PathSegment(
start_pose=start_pose, end_pose=end_pose, eta=eta, kappa=kappa))
path = Eta3Path(path_segments)
# interpolate at several points along the path
ui = np.linspace(0, len(path_segments), 1001)
pos = np.empty((2, ui.size))
for j, u in enumerate(ui):
pos[:, j] = path.calc_path_point(u)
if show_animation:
# plot the path
plt.plot(pos[0, :], pos[1, :])
plt.pause(1.0)
if show_animation:
plt.close("all")
def test3():
path_segments = []
# segment 1: lane-change curve
start_pose = [0, 0, 0]
end_pose = [4, 1.5, 0]
# NOTE: The ordering on kappa is [kappa_A, kappad_A, kappa_B, kappad_B], with kappad_* being the curvature derivative
kappa = [0, 0, 0, 0]
eta = [4.27, 4.27, 0, 0, 0, 0]
path_segments.append(Eta3PathSegment(
start_pose=start_pose, end_pose=end_pose, eta=eta, kappa=kappa))
# segment 2: line segment
start_pose = [4, 1.5, 0]
end_pose = [5.5, 1.5, 0]
kappa = [0, 0, 0, 0]
eta = [0, 0, 0, 0, 0, 0]
path_segments.append(Eta3PathSegment(
start_pose=start_pose, end_pose=end_pose, eta=eta, kappa=kappa))
# segment 3: cubic spiral
start_pose = [5.5, 1.5, 0]
end_pose = [7.4377, 1.8235, 0.6667]
kappa = [0, 0, 1, 1]
eta = [1.88, 1.88, 0, 0, 0, 0]
path_segments.append(Eta3PathSegment(
start_pose=start_pose, end_pose=end_pose, eta=eta, kappa=kappa))
# segment 4: generic twirl arc
start_pose = [7.4377, 1.8235, 0.6667]
end_pose = [7.8, 4.3, 1.8]
kappa = [1, 1, 0.5, 0]
eta = [7, 10, 10, -10, 4, 4]
path_segments.append(Eta3PathSegment(
start_pose=start_pose, end_pose=end_pose, eta=eta, kappa=kappa))
# segment 5: circular arc
start_pose = [7.8, 4.3, 1.8]
end_pose = [5.4581, 5.8064, 3.3416]
kappa = [0.5, 0, 0.5, 0]
eta = [2.98, 2.98, 0, 0, 0, 0]
path_segments.append(Eta3PathSegment(
start_pose=start_pose, end_pose=end_pose, eta=eta, kappa=kappa))
# construct the whole path
path = Eta3Path(path_segments)
# interpolate at several points along the path
ui = np.linspace(0, len(path_segments), 1001)
pos = np.empty((2, ui.size))
for i, u in enumerate(ui):
pos[:, i] = path.calc_path_point(u)
# plot the path
if show_animation:
plt.figure('Path from Reference')
plt.plot(pos[0, :], pos[1, :])
plt.xlabel('x')
plt.ylabel('y')
plt.title('Path')
plt.pause(1.0)
plt.show()
def main():
"""
recreate path from reference (see Table 1)
"""
test1()
test2()
test3()
if __name__ == '__main__':
main()
| mit |
kaczla/PJN | src/Przecinki/scikit.py | 1 | 1048 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
import sys
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import cross_val_predict
from sklearn import linear_model
from sklearn import datasets
X = []
Y = []
for line in sys.stdin:
line = line.rstrip()
X.append([len(line.split())])
Y.append(line.count(","))
lr = linear_model.LinearRegression()
predicted = cross_val_predict(lr, X, Y)
FILE = open(sys.argv[1], "r")
X_TEST = []
Y_TEST = []
for line in FILE:
line = line.rstrip()
Y_TEST.append(line.count(","))
line = line.replace(",", "")
X_TEST.append([len(line.split())])
regr = linear_model.LinearRegression()
regr.fit(X, Y)
print "Coefficients: ", regr.coef_
print "Residual sum of squares: %.2f" % np.mean((regr.predict(X_TEST) - Y_TEST) ** 2)
print "Variance score: %.2f" % regr.score(X_TEST, Y_TEST)
plt.scatter(X_TEST, Y_TEST, color='black')
plt.plot(X_TEST, regr.predict(X_TEST), color='green', linewidth=2)
plt.xticks(())
plt.yticks(())
plt.show()
| gpl-2.0 |
danforthcenter/plantcv | plantcv/plantcv/photosynthesis/analyze_fvfm.py | 2 | 5529 | # Fluorescence Analysis
import os
import cv2
import numpy as np
import pandas as pd
from plotnine import ggplot, geom_label, aes, geom_line
from plantcv.plantcv import print_image
from plantcv.plantcv import plot_image
from plantcv.plantcv import fatal_error
from plantcv.plantcv import params
from plantcv.plantcv import outputs
def analyze_fvfm(fdark, fmin, fmax, mask, bins=256, label="default"):
"""Analyze PSII camera images.
Inputs:
fdark = grayscale fdark image
fmin = grayscale fmin image
fmax = grayscale fmax image
mask = mask of plant (binary, single channel)
bins = number of bins (1 to 256 for 8-bit; 1 to 65,536 for 16-bit; default is 256)
label = optional label parameter, modifies the variable name of observations recorded
Returns:
analysis_images = list of images (fv image and fvfm histogram image)
:param fdark: numpy.ndarray
:param fmin: numpy.ndarray
:param fmax: numpy.ndarray
:param mask: numpy.ndarray
:param bins: int
:param label: str
:return analysis_images: numpy.ndarray
"""
# Auto-increment the device counter
params.device += 1
# Check that fdark, fmin, and fmax are grayscale (single channel)
if not all(len(np.shape(i)) == 2 for i in [fdark, fmin, fmax]):
fatal_error("The fdark, fmin, and fmax images must be grayscale images.")
# QC Fdark Image
fdark_mask = cv2.bitwise_and(fdark, fdark, mask=mask)
if np.amax(fdark_mask) > 2000:
qc_fdark = False
else:
qc_fdark = True
# Mask Fmin and Fmax Image
fmin_mask = cv2.bitwise_and(fmin, fmin, mask=mask)
fmax_mask = cv2.bitwise_and(fmax, fmax, mask=mask)
# Calculate Fvariable, where Fv = Fmax - Fmin (masked)
fv = np.subtract(fmax_mask, fmin_mask)
# When Fmin is greater than Fmax, a negative value is returned.
# Because the data type is unsigned integers, negative values roll over, resulting in nonsensical values
# Wherever Fmin is greater than Fmax, set Fv to zero
fv[np.where(fmax_mask < fmin_mask)] = 0
analysis_images = []
# Calculate Fv/Fm (Fvariable / Fmax) where Fmax is greater than zero
# By definition above, wherever Fmax is zero, Fvariable will also be zero
# To calculate the divisions properly we need to change from unit16 to float64 data types
fvfm = fv.astype(np.float64)
analysis_images.append(fvfm)
fmax_flt = fmax_mask.astype(np.float64)
fvfm[np.where(fmax_mask > 0)] /= fmax_flt[np.where(fmax_mask > 0)]
# Calculate the median Fv/Fm value for non-zero pixels
fvfm_median = np.median(fvfm[np.where(fvfm > 0)])
# Calculate the histogram of Fv/Fm non-zero values
fvfm_hist, fvfm_bins = np.histogram(fvfm[np.where(fvfm > 0)], bins, range=(0, 1))
# fvfm_bins is a bins + 1 length list of bin endpoints, so we need to calculate bin midpoints so that
# the we have a one-to-one list of x (FvFm) and y (frequency) values.
# To do this we add half the bin width to each lower bin edge x-value
midpoints = fvfm_bins[:-1] + 0.5 * np.diff(fvfm_bins)
# Calculate which non-zero bin has the maximum Fv/Fm value
max_bin = midpoints[np.argmax(fvfm_hist)]
# Create a dataframe
dataset = pd.DataFrame({'Plant Pixels': fvfm_hist, 'Fv/Fm': midpoints})
# Make the histogram figure using plotnine
fvfm_hist_fig = (ggplot(data=dataset, mapping=aes(x='Fv/Fm', y='Plant Pixels'))
+ geom_line(color='green', show_legend=True)
+ geom_label(label='Peak Bin Value: ' + str(max_bin),
x=.15, y=205, size=8, color='green'))
analysis_images.append(fvfm_hist_fig)
if params.debug == 'print':
print_image(fmin_mask, os.path.join(params.debug_outdir, str(params.device) + '_fmin_mask.png'))
print_image(fmax_mask, os.path.join(params.debug_outdir, str(params.device) + '_fmax_mask.png'))
print_image(fv, os.path.join(params.debug_outdir, str(params.device) + '_fv_convert.png'))
fvfm_hist_fig.save(os.path.join(params.debug_outdir, str(params.device) + '_fv_hist.png'), verbose=False)
elif params.debug == 'plot':
plot_image(fmin_mask, cmap='gray')
plot_image(fmax_mask, cmap='gray')
plot_image(fv, cmap='gray')
print(fvfm_hist_fig)
outputs.add_observation(sample=label, variable='fvfm_hist', trait='Fv/Fm frequencies',
method='plantcv.plantcv.fluor_fvfm', scale='none', datatype=list,
value=fvfm_hist.tolist(), label=np.around(midpoints, decimals=len(str(bins))).tolist())
outputs.add_observation(sample=label, variable='fvfm_hist_peak', trait='peak Fv/Fm value',
method='plantcv.plantcv.fluor_fvfm', scale='none', datatype=float,
value=float(max_bin), label='none')
outputs.add_observation(sample=label, variable='fvfm_median', trait='Fv/Fm median',
method='plantcv.plantcv.fluor_fvfm', scale='none', datatype=float,
value=float(np.around(fvfm_median, decimals=4)), label='none')
outputs.add_observation(sample=label, variable='fdark_passed_qc', trait='Fdark passed QC',
method='plantcv.plantcv.fluor_fvfm', scale='none', datatype=bool,
value=qc_fdark, label='none')
# Store images
outputs.images.append(analysis_images)
return analysis_images
| mit |
linearregression/mpld3 | mpld3/__init__.py | 20 | 1109 | """
Interactive D3 rendering of matplotlib images
=============================================
Functions: General Use
----------------------
:func:`fig_to_html`
convert a figure to an html string
:func:`fig_to_dict`
convert a figure to a dictionary representation
:func:`show`
launch a web server to view an d3/html figure representation
:func:`save_html`
save a figure to an html file
:func:`save_json`
save a JSON representation of a figure to file
Functions: IPython Notebook
---------------------------
:func:`display`
display a figure in an IPython notebook
:func:`enable_notebook`
enable automatic D3 display of figures in the IPython notebook.
:func:`disable_notebook`
disable automatic D3 display of figures in the IPython
"""
__all__ = ["__version__",
"fig_to_html", "fig_to_dict", "fig_to_d3", "display_d3",
"display", "show_d3", "show", "save_html", "save_json",
"enable_notebook", "disable_notebook", "plugins", "urls"]
from .__about__ import __version__
from . import plugins
from . import urls
from ._display import *
| bsd-3-clause |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/neighbors/base.py | 1 | 24541 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import safe_asarray, atleast2d_or_csr, check_arrays
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..externals import six
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
with np.errstate(divide='ignore'):
dist = 1. / dist
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = atleast2d_or_csr(X, copy=False)
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if (self.n_neighbors is None
or self.n_neighbors < self._fit_X.shape[0] // 2):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to point, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise ValueError("must fit neighbors before querying")
X = atleast2d_or_csr(X)
if n_neighbors is None:
n_neighbors = self.n_neighbors
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
j = np.arange(neigh_ind.shape[0])[:, None]
neigh_ind = neigh_ind[j, np.argsort(dist[j, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
return np.sqrt(dist[j, neigh_ind]), neigh_ind
else:
return dist[j, neigh_ind], neigh_ind
else:
return neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
return result
else:
raise ValueError("internal: _fit_method not recognized")
def kneighbors_graph(self, X, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
X = safe_asarray(X)
if n_neighbors is None:
n_neighbors = self.n_neighbors
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones((n_samples1, n_neighbors))
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
data, ind = self.kneighbors(X, n_neighbors + 1,
return_distance=True)
A_data, A_ind = data[:, 1:], ind[:, 1:]
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
return csr_matrix((A_data.ravel(), A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point or points
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the euclidean distances to each point,
only present if return_distance=True.
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.radius_neighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 1.5, 0.5]]...), array([[1, 2]]...)
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise ValueError("must fit neighbors before querying")
X = atleast2d_or_csr(X)
if radius is None:
radius = self.radius
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = [np.where(d < radius)[0] for d in dist]
# if there are the same number of neighbors for each point,
# we can do a normal array. Otherwise, we return an object
# array with elements that are numpy arrays
try:
neigh_ind = np.asarray(neigh_ind, dtype=int)
dtype_F = float
except ValueError:
neigh_ind = np.asarray(neigh_ind, dtype='object')
dtype_F = object
if return_distance:
if self.effective_metric_ == 'euclidean':
dist = np.array([np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)],
dtype=dtype_F)
else:
dist = np.array([d[neigh_ind[i]]
for i, d in enumerate(dist)],
dtype=dtype_F)
return dist, neigh_ind
else:
return neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
ind, dist = results
return dist, ind
else:
return results
else:
raise ValueError("internal: _fit_method not recognized")
def radius_neighbors_graph(self, X, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
X = safe_asarray(X)
if radius is None:
radius = self.radius
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_neighbors = np.array([len(a) for a in A_ind])
n_nonzero = np.sum(n_neighbors)
if A_data is None:
A_data = np.ones(n_nonzero)
A_ind = np.concatenate(list(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_arrays(X, y, sparse_format="csr")
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_arrays(X, y, sparse_format="csr")
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
| apache-2.0 |
Vimos/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
pico12/trading-with-python | sandbox/spreadCalculations.py | 78 | 1496 | '''
Created on 28 okt 2011
@author: jev
'''
from tradingWithPython import estimateBeta, Spread, returns, Portfolio, readBiggerScreener
from tradingWithPython.lib import yahooFinance
from pandas import DataFrame, Series
import numpy as np
import matplotlib.pyplot as plt
import os
symbols = ['SPY','IWM']
y = yahooFinance.HistData('temp.csv')
y.startDate = (2007,1,1)
df = y.loadSymbols(symbols,forceDownload=False)
#df = y.downloadData(symbols)
res = readBiggerScreener('CointPairs.csv')
#---check with spread scanner
#sp = DataFrame(index=symbols)
#
#sp['last'] = df.ix[-1,:]
#sp['targetCapital'] = Series({'SPY':100,'IWM':-100})
#sp['targetShares'] = sp['targetCapital']/sp['last']
#print sp
#The dollar-neutral ratio is about 1 * IWM - 1.7 * IWM. You will get the spread = zero (or probably very near zero)
#s = Spread(symbols, histClose = df)
#print s
#s.value.plot()
#print 'beta (returns)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='returns')
#print 'beta (log)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='log')
#print 'beta (standard)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='standard')
#p = Portfolio(df)
#p.setShares([1, -1.7])
#p.value.plot()
quote = yahooFinance.getQuote(symbols)
print quote
s = Spread(symbols,histClose=df, estimateBeta = False)
s.setLast(quote['last'])
s.setShares(Series({'SPY':1,'IWM':-1.7}))
print s
#s.value.plot()
#s.plot()
fig = figure(2)
s.plot()
| bsd-3-clause |
XiaoxiaoLiu/morphology_analysis | bigneuron/reestimate_radius.py | 1 | 1506 | __author__ = 'xiaoxiaol'
__author__ = 'xiaoxiaol'
# run standardize swc to make sure swc files have one single root, and sorted, and has the valide type id ( 1~4)
import matplotlib.pyplot as plt
import seaborn as sb
import os
import os.path as path
import numpy as np
import pandas as pd
import platform
import sys
import glob
if (platform.system() == "Linux"):
WORK_PATH = "/local1/xiaoxiaol/work"
else:
WORK_PATH = "/Users/xiaoxiaoliu/work"
p = WORK_PATH + '/src/morphology_analysis'
sys.path.append(p)
import bigneuron.recon_prescreening as rp
import bigneuron.plot_distances as plt_dist
import blast_neuron.blast_neuron_comp as bn
### main
data_DIR = "/data/mat/xiaoxiaol/data/big_neuron/silver/0401_gold163_all_soma_sort"
output_dir = data_DIR
#run_consensus(data_DIR, output_dir)
os.system("rm "+data_DIR+"/qsub2/*.qsub")
os.system("rm "+data_DIR+"/qsub2/*.o*")
for item in os.listdir(data_DIR):
folder_name = os.path.join(data_DIR, item)
if os.path.isdir(folder_name):
print folder_name
imagefile = glob.glob(folder_name+'/*.v3dpbd')
imagefile.extend(glob.glob(folder_name+'/*.v3draw'))
files =glob.glob(folder_name+'/*.strict.swc')
if len(files)>0 and len(imagefile)>0:
gs_swc_file =files[0]
if not os.path.exists(gs_swc_file+".out.swc"):
bn.estimate_radius(input_image=imagefile[0], input_swc_path=gs_swc_file,bg_th=40, GEN_QSUB = 0, qsub_script_dir= output_dir+"/qsub2", id=None)
| gpl-3.0 |
siutanwong/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
COL-IU/XLSearch | xlsearch_train.py | 1 | 5042 | import sys
import pickle
import os
import getopt
from time import ctime
import numpy as np
usage = '''
USAGE: python xlsearch_train.py -l [path to xlsearch library]
-p [parameter file]
-o [output file]'''
(pairs, args) = getopt.getopt(sys.argv[1:], 'l:p:o:')
cmd_arg = dict()
for i in range(len(pairs)):
cmd_arg[pairs[i][0]] = pairs[i][1]
if len(cmd_arg) != 3:
print usage
sys.exit(1)
lib_path = cmd_arg['-l']
param_file = cmd_arg['-p']
output_file = cmd_arg['-o']
sys.path.append(lib_path)
from utility import *
from index import EnumIndexBuilder
from fastareader import FastaReader
print 'XLSearch, version 1.0'
print 'Copyright of School of Informatics and Computing, Indiana University'
print 'Current time %s' % ctime()
print 'Training logistic regression models using authetic true-true PSMs...'
print '\nReading paramters from: %s...' % param_file
[param, mass] = read_param(param_file)
param['ntermxlink'] = False
param['neutral_loss']['h2o_loss']['aa'] = set('DEST')
param['neutral_loss']['nh3_loss']['aa'] = set('KNQR')
param['neutral_loss']['h2o_gain']['aa'] = set()
mass['C'] = 103.009184
print 'Reading parameters done!'
print '\nReading MSMS spectra files from directory: %s...' % param['ms_data']
spec_dict = read_spec(param['ms_data'], param, mass)
pickle.dump(spec_dict, file('spectra.pickle', 'w'))
print 'Total number of spectra: %d' % len(spec_dict)
print 'Reading MSMS spectra files done!'
print '\nDeisotoping MSMS spectra...'
spec_dict = pickle.load(file('spectra.pickle'))
deisotoped = dict()
titles = spec_dict.keys()
for i in range(len(titles)):
title = titles[i]
(one, align) = spec_dict[title].deisotope(mass, 4, 0.02)
deisotoped[title] = one
pickle.dump(deisotoped, file('deisotoped.pickle', 'w'))
deisotoped = pickle.load(file('deisotoped.pickle'))
spec_dict = deisotoped
print 'Deisotoping MSMS spectra done!'
print 'Current time %s' % ctime()
print '\nBuilding index for all possible inter-peptide cross-links...'
index = EnumIndexBuilder(param['target_database'], spec_dict, mass, param)
pickle.dump(index, file('index.pickle', 'w'))
index = pickle.load(file('index.pickle'))
print 'Building index done!'
print 'Current time %s' % ctime()
print '\nComputing features for candidate PSMs for query spectra...'
results = []
titles = []
for title in index.search_index.keys():
if len(index.search_index[title]) != 0:
titles.append(title)
length = len(titles)
for i in range(0, length):
print '%d / %d' % (i, length)
sys.stdout.flush()
title = titles[i]
result = get_matches_per_spec(mass, param, index, title)
result = [title, result]
results.append(result)
print 'Computing features done!\n'
print 'Current time: %s' % ctime()
pickle.dump(results, file('results.pickle', 'w'))
results = pickle.load(file('results.pickle'))
print 'Extracting authentic true-true PSMs...'
true_true = get_true_true(results, index, param, mass)
pickle.dump(true_true, file('TT.pickle', 'w'))
print 'Extracting authentic true-true PSMs done!'
print 'Extracting true-false PSMs based on true-true PSMs as seeds...'
true_false = get_true_false(true_true, param, mass)
pickle.dump(true_false, file('TF.pickle', 'w'))
print 'Extracting true-false PSMs done!'
print 'Extracting false-false PSMs based on true-true PSMs as seeds...'
false_false = get_false_false(true_true, param, mass)
pickle.dump(false_false, file('FF.pickle', 'w'))
print 'Extracting false-false PSMs done!'
print 'Computing feature matrix for true-true, true-false, false-false PSMs...'
X_true_true = get_feature_matrix(true_true)
X_true_false = get_feature_matrix(true_false)
X_false_false = get_feature_matrix(false_false)
X_TT_TF = np.concatenate((X_true_true, X_true_false), axis = 0)
y_TT_TF = []
y_TT_TF.extend([1.0] * len(true_true))
y_TT_TF.extend([0.0] * len(true_false))
y_TT_TF = np.asarray(y_TT_TF)
y_TT_TF = y_TT_TF.T
X_TF_FF = np.concatenate((X_true_false, X_false_false), axis = 0)
y_TF_FF = []
y_TF_FF.extend([1.0] * len(true_false))
y_TF_FF.extend([0.0] * len(false_false))
y_TF_FF = np.asarray(y_TF_FF)
y_TF_FF = y_TF_FF.T
print 'Computing features done!'
from sklearn import linear_model
log_reg = linear_model.LogisticRegression()
log_reg.fit(X_TT_TF, y_TT_TF)
model_TT_TF = []
model_TT_TF.extend(log_reg.intercept_.tolist())
model_TT_TF.extend(log_reg.coef_.tolist())
log_reg = linear_model.LogisticRegression()
log_reg.fit(X_TF_FF, y_TF_FF)
model_TF_FF = []
model_TF_FF.extend(log_reg.intercept_.tolist())
model_TF_FF.extend(log_reg.coef_.tolist())
f = open(output_file, 'w')
f.write('# Classifier I (TT-TF) coefficients')
for i in range(len(model_TT_TF)):
f.write('CI%02d\t')
f.write('%.60f\n' % model_TT_TF[i])
f.write('# Classifier II (TF-FF) coefficients')
for i in range(len(model_TF_FF)):
f.write('CII%02d\t')
f.write('%.60f\n' % model_TF_FF[i])
f.write('nTT\t%d\n' % len(true_true))
f.write('nTF\t%d\n' % len(true_false))
f.write('nFF\t%d\n' % len(false_false))
f.close()
print 'XLSearch train mode finished!'
| mit |
Sumith1896/sympy | sympy/utilities/runtests.py | 4 | 78928 | """
This is our testing framework.
Goals:
* it should be compatible with py.test and operate very similarly
(or identically)
* doesn't require any external dependencies
* preferably all the functionality should be in this file only
* no magic, just import the test file and execute the test functions, that's it
* portable
"""
from __future__ import print_function, division
import os
import sys
import platform
import inspect
import traceback
import pdb
import re
import linecache
from fnmatch import fnmatch
from timeit import default_timer as clock
import doctest as pdoctest # avoid clashing with our doctest() function
from doctest import DocTestFinder, DocTestRunner
import random
import subprocess
import signal
import stat
from inspect import isgeneratorfunction
from sympy.core.cache import clear_cache
from sympy.core.compatibility import exec_, PY3, string_types, range
from sympy.utilities.misc import find_executable
from sympy.external import import_module
from sympy.utilities.exceptions import SymPyDeprecationWarning
IS_WINDOWS = (os.name == 'nt')
class Skipped(Exception):
pass
import __future__
# add more flags ??
future_flags = __future__.division.compiler_flag
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in ``s``, and return the result.
If the string ``s`` is Unicode, it is encoded using the stdout
encoding and the ``backslashreplace`` error handler.
"""
# After a 2to3 run the below code is bogus, so wrap it with a version check
if not PY3:
if isinstance(s, unicode):
s = s.encode(pdoctest._encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
pdoctest._indent = _indent
# ovverride reporter to maintain windows and python3
def _report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
s = self._checker.output_difference(example, got, self.optionflags)
s = s.encode('raw_unicode_escape').decode('utf8', 'ignore')
out(self._failure_header(test, example) + s)
if PY3 and IS_WINDOWS:
DocTestRunner.report_failure = _report_failure
def convert_to_native_paths(lst):
"""
Converts a list of '/' separated paths into a list of
native (os.sep separated) paths and converts to lowercase
if the system is case insensitive.
"""
newlst = []
for i, rv in enumerate(lst):
rv = os.path.join(*rv.split("/"))
# on windows the slash after the colon is dropped
if sys.platform == "win32":
pos = rv.find(':')
if pos != -1:
if rv[pos + 1] != '\\':
rv = rv[:pos + 1] + '\\' + rv[pos + 1:]
newlst.append(sys_normcase(rv))
return newlst
def get_sympy_dir():
"""
Returns the root sympy directory and set the global value
indicating whether the system is case sensitive or not.
"""
global sys_case_insensitive
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
sys_case_insensitive = (os.path.isdir(sympy_dir) and
os.path.isdir(sympy_dir.lower()) and
os.path.isdir(sympy_dir.upper()))
return sys_normcase(sympy_dir)
def sys_normcase(f):
if sys_case_insensitive: # global defined after call to get_sympy_dir()
return f.lower()
return f
def setup_pprint():
from sympy import pprint_use_unicode, init_printing
# force pprint to be in ascii mode in doctests
pprint_use_unicode(False)
# hook our nice, hash-stable strprinter
init_printing(pretty_print=False)
def run_in_subprocess_with_hash_randomization(function, function_args=(),
function_kwargs={}, command=sys.executable,
module='sympy.utilities.runtests', force=False):
"""
Run a function in a Python subprocess with hash randomization enabled.
If hash randomization is not supported by the version of Python given, it
returns False. Otherwise, it returns the exit value of the command. The
function is passed to sys.exit(), so the return value of the function will
be the return value.
The environment variable PYTHONHASHSEED is used to seed Python's hash
randomization. If it is set, this function will return False, because
starting a new subprocess is unnecessary in that case. If it is not set,
one is set at random, and the tests are run. Note that if this
environment variable is set when Python starts, hash randomization is
automatically enabled. To force a subprocess to be created even if
PYTHONHASHSEED is set, pass ``force=True``. This flag will not force a
subprocess in Python versions that do not support hash randomization (see
below), because those versions of Python do not support the ``-R`` flag.
``function`` should be a string name of a function that is importable from
the module ``module``, like "_test". The default for ``module`` is
"sympy.utilities.runtests". ``function_args`` and ``function_kwargs``
should be a repr-able tuple and dict, respectively. The default Python
command is sys.executable, which is the currently running Python command.
This function is necessary because the seed for hash randomization must be
set by the environment variable before Python starts. Hence, in order to
use a predetermined seed for tests, we must start Python in a separate
subprocess.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
Examples
========
>>> from sympy.utilities.runtests import (
... run_in_subprocess_with_hash_randomization)
>>> # run the core tests in verbose mode
>>> run_in_subprocess_with_hash_randomization("_test",
... function_args=("core",),
... function_kwargs={'verbose': True}) # doctest: +SKIP
# Will return 0 if sys.executable supports hash randomization and tests
# pass, 1 if they fail, and False if it does not support hash
# randomization.
"""
# Note, we must return False everywhere, not None, as subprocess.call will
# sometimes return None.
# First check if the Python version supports hash randomization
# If it doesn't have this support, it won't reconize the -R flag
p = subprocess.Popen([command, "-RV"], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode != 0:
return False
hash_seed = os.getenv("PYTHONHASHSEED")
if not hash_seed:
os.environ["PYTHONHASHSEED"] = str(random.randrange(2**32))
else:
if not force:
return False
# Now run the command
commandstring = ("import sys; from %s import %s;sys.exit(%s(*%s, **%s))" %
(module, function, function, repr(function_args),
repr(function_kwargs)))
try:
p = subprocess.Popen([command, "-R", "-c", commandstring])
p.communicate()
except KeyboardInterrupt:
p.wait()
finally:
# Put the environment variable back, so that it reads correctly for
# the current Python process.
if hash_seed is None:
del os.environ["PYTHONHASHSEED"]
else:
os.environ["PYTHONHASHSEED"] = hash_seed
return p.returncode
def run_all_tests(test_args=(), test_kwargs={}, doctest_args=(),
doctest_kwargs={}, examples_args=(), examples_kwargs={'quiet': True}):
"""
Run all tests.
Right now, this runs the regular tests (bin/test), the doctests
(bin/doctest), the examples (examples/all.py), and the sage tests (see
sympy/external/tests/test_sage.py).
This is what ``setup.py test`` uses.
You can pass arguments and keyword arguments to the test functions that
support them (for now, test, doctest, and the examples). See the
docstrings of those functions for a description of the available options.
For example, to run the solvers tests with colors turned off:
>>> from sympy.utilities.runtests import run_all_tests
>>> run_all_tests(test_args=("solvers",),
... test_kwargs={"colors:False"}) # doctest: +SKIP
"""
tests_successful = True
try:
# Regular tests
if not test(*test_args, **test_kwargs):
# some regular test fails, so set the tests_successful
# flag to false and continue running the doctests
tests_successful = False
# Doctests
print()
if not doctest(*doctest_args, **doctest_kwargs):
tests_successful = False
# Examples
print()
sys.path.append("examples")
from all import run_examples # examples/all.py
if not run_examples(*examples_args, **examples_kwargs):
tests_successful = False
# Sage tests
if not (sys.platform == "win32" or PY3):
# run Sage tests; Sage currently doesn't support Windows or Python 3
dev_null = open(os.devnull, 'w')
if subprocess.call("sage -v", shell=True, stdout=dev_null,
stderr=dev_null) == 0:
if subprocess.call("sage -python bin/test "
"sympy/external/tests/test_sage.py", shell=True) != 0:
tests_successful = False
if tests_successful:
return
else:
# Return nonzero exit code
sys.exit(1)
except KeyboardInterrupt:
print()
print("DO *NOT* COMMIT!")
sys.exit(1)
def test(*paths, **kwargs):
"""
Run tests in the specified test_*.py files.
Tests in a particular test_*.py file are run if any of the given strings
in ``paths`` matches a part of the test file's path. If ``paths=[]``,
tests in all test_*.py files are run.
Notes:
- If sort=False, tests are run in random order (not default).
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
**Explanation of test results**
====== ===============================================================
Output Meaning
====== ===============================================================
. passed
F failed
X XPassed (expected to fail but passed)
f XFAILed (expected to fail and indeed failed)
s skipped
w slow
T timeout (e.g., when ``--timeout`` is used)
K KeyboardInterrupt (when running the slow tests with ``--slow``,
you can interrupt one of them without killing the test runner)
====== ===============================================================
Colors have no additional meaning and are used just to facilitate
interpreting the output.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.test() # doctest: +SKIP
Run one file:
>>> sympy.test("sympy/core/tests/test_basic.py") # doctest: +SKIP
>>> sympy.test("_basic") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... "sympy/functions") # doctest: +SKIP
Run all tests in sympy/core and sympy/utilities:
>>> sympy.test("/core", "/util") # doctest: +SKIP
Run specific test from a file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... kw="test_equality") # doctest: +SKIP
Run specific test from any file:
>>> sympy.test(kw="subs") # doctest: +SKIP
Run the tests with verbose mode on:
>>> sympy.test(verbose=True) # doctest: +SKIP
Don't sort the test output:
>>> sympy.test(sort=False) # doctest: +SKIP
Turn on post-mortem pdb:
>>> sympy.test(pdb=True) # doctest: +SKIP
Turn off colors:
>>> sympy.test(colors=False) # doctest: +SKIP
Force colors, even when the output is not to a terminal (this is useful,
e.g., if you are piping to ``less -r`` and you still want colors)
>>> sympy.test(force_colors=False) # doctest: +SKIP
The traceback verboseness can be set to "short" or "no" (default is
"short")
>>> sympy.test(tb='no') # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. For instance, to run the first half of the test suite:
>>> sympy.test(split='1/2') # doctest: +SKIP
You can disable running the tests in a separate subprocess using
``subprocess=False``. This is done to support seeding hash randomization,
which is enabled by default in the Python versions where it is supported.
If subprocess=False, hash randomization is enabled/disabled according to
whether it has been enabled or not in the calling Python process.
However, even if it is enabled, the seed cannot be printed unless it is
called from a new Python process.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
If hash randomization is not supported ``subprocess=False`` is used
automatically.
>>> sympy.test(subprocess=False) # doctest: +SKIP
To set the hash randomization seed, set the environment variable
``PYTHONHASHSEED`` before running the tests. This can be done from within
Python using
>>> import os
>>> os.environ['PYTHONHASHSEED'] = '42' # doctest: +SKIP
Or from the command line using
$ PYTHONHASHSEED=42 ./bin/test
If the seed is not set, a random seed will be chosen.
Note that to reproduce the same hash values, you must use both the same seed
as well as the same architecture (32-bit vs. 64-bit).
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_test",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_test(*paths, **kwargs))
if not val or i == 0:
return val
def _test(*paths, **kwargs):
"""
Internal function that actually runs the tests.
All keyword arguments from ``test()`` are passed to this function except for
``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstring of
``test()`` for more information.
"""
verbose = kwargs.get("verbose", False)
tb = kwargs.get("tb", "short")
kw = kwargs.get("kw", None) or ()
# ensure that kw is a tuple
if isinstance(kw, str):
kw = (kw, )
post_mortem = kwargs.get("pdb", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
sort = kwargs.get("sort", True)
seed = kwargs.get("seed", None)
if seed is None:
seed = random.randrange(100000000)
timeout = kwargs.get("timeout", False)
slow = kwargs.get("slow", False)
enhance_asserts = kwargs.get("enhance_asserts", False)
split = kwargs.get('split', None)
blacklist = kwargs.get('blacklist', [])
blacklist = convert_to_native_paths(blacklist)
r = PyTestReporter(verbose=verbose, tb=tb, colors=colors,
force_colors=force_colors, split=split)
t = SymPyTests(r, kw, post_mortem, seed)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
test_files = t.get_test_files('sympy')
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
return int(not t.test(sort=sort, timeout=timeout,
slow=slow, enhance_asserts=enhance_asserts))
def doctest(*paths, **kwargs):
"""
Runs doctests in all \*.py files in the sympy directory which match
any of the given strings in ``paths`` or all tests if paths=[].
Notes:
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.doctest() # doctest: +SKIP
Run one file:
>>> sympy.doctest("sympy/core/basic.py") # doctest: +SKIP
>>> sympy.doctest("polynomial.rst") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.doctest("/functions", "basic.py") # doctest: +SKIP
Run any file having polynomial in its name, doc/src/modules/polynomial.rst,
sympy/functions/special/polynomials.py, and sympy/polys/polynomial.py:
>>> sympy.doctest("polynomial") # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. Note that the regular doctests and the Sphinx
doctests are split independently. For instance, to run the first half of
the test suite:
>>> sympy.doctest(split='1/2') # doctest: +SKIP
The ``subprocess`` and ``verbose`` options are the same as with the function
``test()``. See the docstring of that function for more information.
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_doctest",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_doctest(*paths, **kwargs))
if not val or i == 0:
return val
def _doctest(*paths, **kwargs):
"""
Internal function that actually runs the doctests.
All keyword arguments from ``doctest()`` are passed to this function
except for ``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstrings of
``doctest()`` and ``test()`` for more information.
"""
normal = kwargs.get("normal", False)
verbose = kwargs.get("verbose", False)
blacklist = kwargs.get("blacklist", [])
split = kwargs.get('split', None)
blacklist.extend([
"doc/src/modules/plotting.rst", # generates live plots
"sympy/utilities/compilef.py", # needs tcc
"sympy/physics/gaussopt.py", # raises deprecation warning
])
if import_module('numpy') is None:
blacklist.extend([
"sympy/plotting/experimental_lambdify.py",
"sympy/plotting/plot_implicit.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py",
"examples/intermediate/sample.py",
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py",
"doc/src/modules/numeric-computation.rst"
])
else:
if import_module('matplotlib') is None:
blacklist.extend([
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py"
])
else:
# don't display matplotlib windows
from sympy.plotting.plot import unset_show
unset_show()
if import_module('pyglet') is None:
blacklist.extend(["sympy/plotting/pygletplot"])
if import_module('theano') is None:
blacklist.extend(["doc/src/modules/numeric-computation.rst"])
# disabled because of doctest failures in asmeurer's bot
blacklist.extend([
"sympy/utilities/autowrap.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py"
])
# blacklist these modules until issue 4840 is resolved
blacklist.extend([
"sympy/conftest.py",
"sympy/utilities/benchmarking.py"
])
blacklist = convert_to_native_paths(blacklist)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
r = PyTestReporter(verbose, split=split)
t = SymPyDocTests(r, normal)
test_files = t.get_test_files('sympy')
test_files.extend(t.get_test_files('examples', init_only=False))
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# take only what was requested...but not blacklisted items
# and allow for partial match anywhere or fnmatch of name
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
# run the tests and record the result for this *py portion of the tests
if t._testfiles:
failed = not t.test()
else:
failed = False
# N.B.
# --------------------------------------------------------------------
# Here we test *.rst files at or below doc/src. Code from these must
# be self supporting in terms of imports since there is no importing
# of necessary modules by doctest.testfile. If you try to pass *.py
# files through this they might fail because they will lack the needed
# imports and smarter parsing that can be done with source code.
#
test_files = t.get_test_files('doc/src', '*.rst', init_only=False)
test_files.sort()
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# Take only what was requested as long as it's not on the blacklist.
# Paths were already made native in *py tests so don't repeat here.
# There's no chance of having a *py file slip through since we
# only have *rst files in test_files.
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
setup_pprint()
first_report = True
for rst_file in matched:
if not os.path.isfile(rst_file):
continue
old_displayhook = sys.displayhook
try:
out = sympytestfile(
rst_file, module_relative=False, encoding='utf-8',
optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
finally:
# make sure we return to the original displayhook in case some
# doctest has changed that
sys.displayhook = old_displayhook
rstfailed, tested = out
if tested:
failed = rstfailed or failed
if first_report:
first_report = False
msg = 'rst doctests start'
if not t._testfiles:
r.start(msg=msg)
else:
r.write_center(msg)
print()
# use as the id, everything past the first 'sympy'
file_id = rst_file[rst_file.find('sympy') + len('sympy') + 1:]
print(file_id, end=" ")
# get at least the name out so it is know who is being tested
wid = r.terminal_width - len(file_id) - 1 # update width
test_file = '[%s]' % (tested)
report = '[%s]' % (rstfailed or 'OK')
print(''.join(
[test_file, ' '*(wid - len(test_file) - len(report)), report])
)
# the doctests for *py will have printed this message already if there was
# a failure, so now only print it if there was intervening reporting by
# testing the *rst as evidenced by first_report no longer being True.
if not first_report and failed:
print()
print("DO *NOT* COMMIT!")
return int(failed)
sp = re.compile(r'([0-9]+)/([1-9][0-9]*)')
def split_list(l, split):
"""
Splits a list into part a of b
split should be a string of the form 'a/b'. For instance, '1/3' would give
the split one of three.
If the length of the list is not divisible by the number of splits, the
last split will have more items.
>>> from sympy.utilities.runtests import split_list
>>> a = list(range(10))
>>> split_list(a, '1/3')
[0, 1, 2]
>>> split_list(a, '2/3')
[3, 4, 5]
>>> split_list(a, '3/3')
[6, 7, 8, 9]
"""
m = sp.match(split)
if not m:
raise ValueError("split must be a string of the form a/b where a and b are ints")
i, t = map(int, m.groups())
return l[(i - 1)*len(l)//t:i*len(l)//t]
from collections import namedtuple
SymPyTestResults = namedtuple('TestResults', 'failed attempted')
def sympytestfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False,
parser=pdoctest.DocTestParser(), encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg ``module_relative`` specifies how filenames
should be interpreted:
- If ``module_relative`` is True (the default), then ``filename``
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
``package`` argument is specified, then it is relative to that
package. To ensure os-independence, ``filename`` should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If ``module_relative`` is False, then ``filename`` specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg ``name`` gives the name of the test; by default
use the file's basename.
Optional keyword argument ``package`` is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify ``package`` if ``module_relative`` is False.
Optional keyword arg ``globs`` gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg ``extraglobs`` gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg ``verbose`` prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg ``report`` prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg ``optionflags`` or's together module constants,
and defaults to 0. Possible values (see the docs for details):
- DONT_ACCEPT_TRUE_FOR_1
- DONT_ACCEPT_BLANKLINE
- NORMALIZE_WHITESPACE
- ELLIPSIS
- SKIP
- IGNORE_EXCEPTION_DETAIL
- REPORT_UDIFF
- REPORT_CDIFF
- REPORT_NDIFF
- REPORT_ONLY_FIRST_FAILURE
Optional keyword arg ``raise_on_error`` raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg ``parser`` specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg ``encoding`` specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if not PY3:
text, filename = pdoctest._load_testfile(
filename, package, module_relative)
if encoding is not None:
text = text.decode(encoding)
else:
text, filename = pdoctest._load_testfile(
filename, package, module_relative, encoding)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = pdoctest.DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = SymPyDocTestRunner(verbose=verbose, optionflags=optionflags)
runner._checker = SymPyOutputChecker()
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test, compileflags=future_flags)
if report:
runner.summarize()
if pdoctest.master is None:
pdoctest.master = runner
else:
pdoctest.master.merge(runner)
return SymPyTestResults(runner.failures, runner.tries)
class SymPyTests(object):
def __init__(self, reporter, kw="", post_mortem=False,
seed=None):
self._post_mortem = post_mortem
self._kw = kw
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._testfiles = []
self._seed = seed if seed is not None else random.random()
def test(self, sort=False, timeout=False, slow=False, enhance_asserts=False):
"""
Runs the tests returning True if all tests pass, otherwise False.
If sort=False run tests in random order.
"""
if sort:
self._testfiles.sort()
else:
from random import shuffle
random.seed(self._seed)
shuffle(self._testfiles)
self._reporter.start(self._seed)
for f in self._testfiles:
try:
self.test_file(f, sort, timeout, slow, enhance_asserts)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def _enhance_asserts(self, source):
from ast import (NodeTransformer, Compare, Name, Store, Load, Tuple,
Assign, BinOp, Str, Mod, Assert, parse, fix_missing_locations)
ops = {"Eq": '==', "NotEq": '!=', "Lt": '<', "LtE": '<=',
"Gt": '>', "GtE": '>=', "Is": 'is', "IsNot": 'is not',
"In": 'in', "NotIn": 'not in'}
class Transform(NodeTransformer):
def visit_Assert(self, stmt):
if isinstance(stmt.test, Compare):
compare = stmt.test
values = [compare.left] + compare.comparators
names = [ "_%s" % i for i, _ in enumerate(values) ]
names_store = [ Name(n, Store()) for n in names ]
names_load = [ Name(n, Load()) for n in names ]
target = Tuple(names_store, Store())
value = Tuple(values, Load())
assign = Assign([target], value)
new_compare = Compare(names_load[0], compare.ops, names_load[1:])
msg_format = "\n%s " + "\n%s ".join([ ops[op.__class__.__name__] for op in compare.ops ]) + "\n%s"
msg = BinOp(Str(msg_format), Mod(), Tuple(names_load, Load()))
test = Assert(new_compare, msg, lineno=stmt.lineno, col_offset=stmt.col_offset)
return [assign, test]
else:
return stmt
tree = parse(source)
new_tree = Transform().visit(tree)
return fix_missing_locations(new_tree)
def test_file(self, filename, sort=True, timeout=False, slow=False, enhance_asserts=False):
funcs = []
try:
gl = {'__file__': filename}
try:
if PY3:
open_file = lambda: open(filename, encoding="utf8")
else:
open_file = lambda: open(filename)
with open_file() as f:
source = f.read()
if self._kw:
for l in source.splitlines():
if l.lstrip().startswith('def '):
if any(l.find(k) != -1 for k in self._kw):
break
else:
return
if enhance_asserts:
try:
source = self._enhance_asserts(source)
except ImportError:
pass
code = compile(source, filename, "exec")
exec_(code, gl)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
clear_cache()
self._count += 1
random.seed(self._seed)
pytestfile = ""
if "XFAIL" in gl:
pytestfile = inspect.getsourcefile(gl["XFAIL"])
pytestfile2 = ""
if "slow" in gl:
pytestfile2 = inspect.getsourcefile(gl["slow"])
disabled = gl.get("disabled", False)
if not disabled:
# we need to filter only those functions that begin with 'test_'
# that are defined in the testing file or in the file where
# is defined the XFAIL decorator
funcs = [gl[f] for f in gl.keys() if f.startswith("test_") and
(inspect.isfunction(gl[f]) or inspect.ismethod(gl[f])) and
(inspect.getsourcefile(gl[f]) == filename or
inspect.getsourcefile(gl[f]) == pytestfile or
inspect.getsourcefile(gl[f]) == pytestfile2)]
if slow:
funcs = [f for f in funcs if getattr(f, '_slow', False)]
# Sorting of XFAILed functions isn't fixed yet :-(
funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
i = 0
while i < len(funcs):
if isgeneratorfunction(funcs[i]):
# some tests can be generators, that return the actual
# test functions. We unpack it below:
f = funcs.pop(i)
for fg in f():
func = fg[0]
args = fg[1:]
fgw = lambda: func(*args)
funcs.insert(i, fgw)
i += 1
else:
i += 1
# drop functions that are not selected with the keyword expression:
funcs = [x for x in funcs if self.matches(x)]
if not funcs:
return
except Exception:
self._reporter.entering_filename(filename, len(funcs))
raise
self._reporter.entering_filename(filename, len(funcs))
if not sort:
random.shuffle(funcs)
for f in funcs:
self._reporter.entering_test(f)
try:
if getattr(f, '_slow', False) and not slow:
raise Skipped("Slow")
if timeout:
self._timeout(f, timeout)
else:
random.seed(self._seed)
f()
except KeyboardInterrupt:
if getattr(f, '_slow', False):
self._reporter.test_skip("KeyboardInterrupt")
else:
raise
except Exception:
if timeout:
signal.alarm(0) # Disable the alarm. It could not be handled before.
t, v, tr = sys.exc_info()
if t is AssertionError:
self._reporter.test_fail((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
elif t.__name__ == "Skipped":
self._reporter.test_skip(v)
elif t.__name__ == "XFail":
self._reporter.test_xfail()
elif t.__name__ == "XPass":
self._reporter.test_xpass(v)
else:
self._reporter.test_exception((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def _timeout(self, function, timeout):
def callback(x, y):
signal.alarm(0)
raise Skipped("Timeout")
signal.signal(signal.SIGALRM, callback)
signal.alarm(timeout) # Set an alarm with a given timeout
function()
signal.alarm(0) # Disable the alarm
def matches(self, x):
"""
Does the keyword expression self._kw match "x"? Returns True/False.
Always returns True if self._kw is "".
"""
if not self._kw:
return True
for kw in self._kw:
if x.__name__.find(kw) != -1:
return True
return False
def get_test_files(self, dir, pat='test_*.py'):
"""
Returns the list of test_*.py (default) files at or below directory
``dir`` relative to the sympy home directory.
"""
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files if fnmatch(f, pat)])
return sorted([sys_normcase(gi) for gi in g])
class SymPyDocTests(object):
def __init__(self, reporter, normal):
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._normal = normal
self._testfiles = []
def test(self):
"""
Runs the tests and returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._testfiles:
try:
self.test_file(f)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def test_file(self, filename):
clear_cache()
from sympy.core.compatibility import StringIO
rel_name = filename[len(self._root_dir) + 1:]
dirname, file = os.path.split(filename)
module = rel_name.replace(os.sep, '.')[:-3]
if rel_name.startswith("examples"):
# Examples files do not have __init__.py files,
# So we have to temporarily extend sys.path to import them
sys.path.insert(0, dirname)
module = file[:-3] # remove ".py"
setup_pprint()
try:
module = pdoctest._normalize_module(module)
tests = SymPyDocTestFinder().find(module)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
finally:
if rel_name.startswith("examples"):
del sys.path[0]
tests = [test for test in tests if len(test.examples) > 0]
# By default tests are sorted by alphabetical order by function name.
# We sort by line number so one can edit the file sequentially from
# bottom to top. However, if there are decorated functions, their line
# numbers will be too large and for now one must just search for these
# by text and function name.
tests.sort(key=lambda x: -x.lineno)
if not tests:
return
self._reporter.entering_filename(filename, len(tests))
for test in tests:
assert len(test.examples) != 0
# check if there are external dependencies which need to be met
if '_doctest_depends_on' in test.globs:
if not self._process_dependencies(test.globs['_doctest_depends_on']):
self._reporter.test_skip()
continue
runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS |
pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
runner._checker = SymPyOutputChecker()
old = sys.stdout
new = StringIO()
sys.stdout = new
# If the testing is normal, the doctests get importing magic to
# provide the global namespace. If not normal (the default) then
# then must run on their own; all imports must be explicit within
# a function's docstring. Once imported that import will be
# available to the rest of the tests in a given function's
# docstring (unless clear_globs=True below).
if not self._normal:
test.globs = {}
# if this is uncommented then all the test would get is what
# comes by default with a "from sympy import *"
#exec('from sympy import *') in test.globs
test.globs['print_function'] = print_function
try:
f, t = runner.run(test, compileflags=future_flags,
out=new.write, clear_globs=False)
except KeyboardInterrupt:
raise
finally:
sys.stdout = old
if f > 0:
self._reporter.doctest_fail(test.name, new.getvalue())
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def get_test_files(self, dir, pat='*.py', init_only=True):
"""
Returns the list of \*.py files (default) from which docstrings
will be tested which are at or below directory ``dir``. By default,
only those that have an __init__.py in their parent directory
and do not start with ``test_`` will be included.
"""
def importable(x):
"""
Checks if given pathname x is an importable module by checking for
__init__.py file.
Returns True/False.
Currently we only test if the __init__.py file exists in the
directory with the file "x" (in theory we should also test all the
parent dirs).
"""
init_py = os.path.join(os.path.dirname(x), "__init__.py")
return os.path.exists(init_py)
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files
if not f.startswith('test_') and fnmatch(f, pat)])
if init_only:
# skip files that are not importable (i.e. missing __init__.py)
g = [x for x in g if importable(x)]
return [sys_normcase(gi) for gi in g]
def _process_dependencies(self, deps):
"""
Returns ``False`` if some dependencies are not met and the test should be
skipped otherwise returns ``True``.
"""
executables = deps.get('exe', None)
moduledeps = deps.get('modules', None)
viewers = deps.get('disable_viewers', None)
pyglet = deps.get('pyglet', None)
# print deps
if executables is not None:
for ex in executables:
found = find_executable(ex)
if found is None:
return False
if moduledeps is not None:
for extmod in moduledeps:
if extmod == 'matplotlib':
matplotlib = import_module(
'matplotlib',
__import__kwargs={'fromlist':
['pyplot', 'cm', 'collections']},
min_module_version='1.0.0', catch=(RuntimeError,))
if matplotlib is not None:
pass
else:
return False
else:
# TODO min version support
mod = import_module(extmod)
if mod is not None:
version = "unknown"
if hasattr(mod, '__version__'):
version = mod.__version__
else:
return False
if viewers is not None:
import tempfile
tempdir = tempfile.mkdtemp()
os.environ['PATH'] = '%s:%s' % (tempdir, os.environ['PATH'])
if PY3:
vw = '#!/usr/bin/env python3\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
else:
vw = '#!/usr/bin/env python\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
for viewer in viewers:
with open(os.path.join(tempdir, viewer), 'w') as fh:
fh.write(vw)
# make the file executable
os.chmod(os.path.join(tempdir, viewer),
stat.S_IREAD | stat.S_IWRITE | stat.S_IXUSR)
if pyglet:
# monkey-patch pyglet s.t. it does not open a window during
# doctesting
import pyglet
class DummyWindow(object):
def __init__(self, *args, **kwargs):
self.has_exit=True
self.width = 600
self.height = 400
def set_vsync(self, x):
pass
def switch_to(self):
pass
def push_handlers(self, x):
pass
def close(self):
pass
pyglet.window.Window = DummyWindow
return True
class SymPyDocTestFinder(DocTestFinder):
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
Modified from doctest's version by looking harder for code in the
case that it looks like the the code comes from a different module.
In the case of decorated functions (e.g. @vectorize) they appear
to come from a different module (e.g. multidemensional) even though
their code is not there.
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to ``tests``.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Make sure we don't run doctests for classes outside of sympy, such
# as in numpy or scipy.
if inspect.isclass(obj):
if obj.__module__.split('.')[0] != 'sympy':
return
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
if not self._recurse:
return
# Look for tests in a module's contained objects.
if inspect.ismodule(obj):
for rawname, val in obj.__dict__.items():
# Recurse to functions & classes.
if inspect.isfunction(val) or inspect.isclass(val):
# Make sure we don't run doctests functions or classes
# from different modules
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (rawname %s)" % (val, module, rawname)
try:
valname = '%s.%s' % (name, rawname)
self._find(tests, val, valname, module,
source_lines, globs, seen)
except KeyboardInterrupt:
raise
# Look for tests in a module's __test__ dictionary.
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, string_types):
raise ValueError("SymPyDocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, string_types)):
raise ValueError("SymPyDocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj):
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if (inspect.isfunction(val) or
inspect.isclass(val) or
isinstance(val, property)):
# Make sure we don't run doctests functions or classes
# from different modules
if isinstance(val, property):
if hasattr(val.fget, '__module__'):
if val.fget.__module__ != module.__name__:
continue
else:
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (valname %s)" % (
val, module, valname)
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
lineno = None
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, string_types):
# obj is a string in the case for objects in the polys package.
# Note that source_lines is a binary string (compiled polys
# modules), which can't be handled by _find_lineno so determine
# the line number here.
docstring = obj
matches = re.findall("line \d+", name)
assert len(matches) == 1, \
"string '%s' does not contain lineno " % name
# NOTE: this is not the exact linenumber but its better than no
# lineno ;)
lineno = int(matches[0][5:])
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, string_types):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# check that properties have a docstring because _find_lineno
# assumes it
if isinstance(obj, property):
if obj.fget.__doc__ is None:
return None
# Find the docstring's location in the file.
if lineno is None:
# handling of properties is not implemented in _find_lineno so do
# it here
if hasattr(obj, 'func_closure') and obj.func_closure is not None:
tobj = obj.func_closure[0].cell_contents
elif isinstance(obj, property):
tobj = obj.fget
else:
tobj = obj
lineno = self._find_lineno(tobj, source_lines)
if lineno is None:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
if hasattr(obj, '_doctest_depends_on'):
globs['_doctest_depends_on'] = obj._doctest_depends_on
else:
globs['_doctest_depends_on'] = {}
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
class SymPyDocTestRunner(DocTestRunner):
"""
A class used to run DocTest test cases, and accumulate statistics.
The ``run`` method is used to process a single DocTest case. It
returns a tuple ``(f, t)``, where ``t`` is the number of test cases
tried, and ``f`` is the number of test cases that failed.
Modified from the doctest version to not reset the sys.displayhook (see
issue 5140).
See the docstring of the original DocTestRunner for more information.
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in ``test``, and display the results using the
writer function ``out``.
The examples are run in the namespace ``test.globs``. If
``clear_globs`` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use ``clear_globs=False``.
``compileflags`` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to ``globs``.
The output of each example is checked using
``SymPyDocTestRunner.check_output``, and the results are
formatted by the ``SymPyDocTestRunner.report_*`` methods.
"""
self.test = test
if compileflags is None:
compileflags = pdoctest._extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = pdoctest._OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = pdoctest.linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
test.globs['print_function'] = print_function
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
# We have to override the name mangled methods.
SymPyDocTestRunner._SymPyDocTestRunner__patched_linecache_getlines = \
DocTestRunner._DocTestRunner__patched_linecache_getlines
SymPyDocTestRunner._SymPyDocTestRunner__run = DocTestRunner._DocTestRunner__run
SymPyDocTestRunner._SymPyDocTestRunner__record_outcome = \
DocTestRunner._DocTestRunner__record_outcome
class SymPyOutputChecker(pdoctest.OutputChecker):
"""
Compared to the OutputChecker from the stdlib our OutputChecker class
supports numerical comparison of floats occuring in the output of the
doctest examples
"""
def __init__(self):
# NOTE OutputChecker is an old-style class with no __init__ method,
# so we can't call the base class version of __init__ here
got_floats = r'(\d+\.\d*|\.\d+)'
# floats in the 'want' string may contain ellipses
want_floats = got_floats + r'(\.{3})?'
front_sep = r'\s|\+|\-|\*|,'
back_sep = front_sep + r'|j|e'
fbeg = r'^%s(?=%s|$)' % (got_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, got_floats, back_sep)
self.num_got_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
fbeg = r'^%s(?=%s|$)' % (want_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, want_floats, back_sep)
self.num_want_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# TODO parse integers as well ?
# Parse floats and compare them. If some of the parsed floats contain
# ellipses, skip the comparison.
matches = self.num_got_rgx.finditer(got)
numbers_got = [match.group(1) for match in matches] # list of strs
matches = self.num_want_rgx.finditer(want)
numbers_want = [match.group(1) for match in matches] # list of strs
if len(numbers_got) != len(numbers_want):
return False
if len(numbers_got) > 0:
nw_ = []
for ng, nw in zip(numbers_got, numbers_want):
if '...' in nw:
nw_.append(ng)
continue
else:
nw_.append(nw)
if abs(float(ng)-float(nw)) > 1e-5:
return False
got = self.num_got_rgx.sub(r'%s', got)
got = got % tuple(nw_)
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & pdoctest.NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & pdoctest.ELLIPSIS:
if pdoctest._ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
class Reporter(object):
"""
Parent class for all reporters.
"""
pass
class PyTestReporter(Reporter):
"""
Py.test like reporter. Should produce output identical to py.test.
"""
def __init__(self, verbose=False, tb="short", colors=True,
force_colors=False, split=None):
self._verbose = verbose
self._tb_style = tb
self._colors = colors
self._force_colors = force_colors
self._xfailed = 0
self._xpassed = []
self._failed = []
self._failed_doctest = []
self._passed = 0
self._skipped = 0
self._exceptions = []
self._terminal_width = None
self._default_width = 80
self._split = split
# this tracks the x-position of the cursor (useful for positioning
# things on the screen), without the need for any readline library:
self._write_pos = 0
self._line_wrap = False
def root_dir(self, dir):
self._root_dir = dir
@property
def terminal_width(self):
if self._terminal_width is not None:
return self._terminal_width
def findout_terminal_width():
if sys.platform == "win32":
# Windows support is based on:
#
# http://code.activestate.com/recipes/
# 440694-determine-size-of-console-window-on-windows/
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(_, _, _, _, _, left, _, right, _, _, _) = \
struct.unpack("hhhhHhhhhhh", csbi.raw)
return right - left
else:
return self._default_width
if hasattr(sys.stdout, 'isatty') and not sys.stdout.isatty():
return self._default_width # leave PIPEs alone
try:
process = subprocess.Popen(['stty', '-a'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = process.stdout.read()
if PY3:
stdout = stdout.decode("utf-8")
except (OSError, IOError):
pass
else:
# We support the following output formats from stty:
#
# 1) Linux -> columns 80
# 2) OS X -> 80 columns
# 3) Solaris -> columns = 80
re_linux = r"columns\s+(?P<columns>\d+);"
re_osx = r"(?P<columns>\d+)\s*columns;"
re_solaris = r"columns\s+=\s+(?P<columns>\d+);"
for regex in (re_linux, re_osx, re_solaris):
match = re.search(regex, stdout)
if match is not None:
columns = match.group('columns')
try:
width = int(columns)
except ValueError:
pass
if width != 0:
return width
return self._default_width
width = findout_terminal_width()
self._terminal_width = width
return width
def write(self, text, color="", align="left", width=None,
force_colors=False):
"""
Prints a text on the screen.
It uses sys.stdout.write(), so no readline library is necessary.
Parameters
==========
color : choose from the colors below, "" means default color
align : "left"/"right", "left" is a normal print, "right" is aligned on
the right-hand side of the screen, filled with spaces if
necessary
width : the screen width
"""
color_templates = (
("Black", "0;30"),
("Red", "0;31"),
("Green", "0;32"),
("Brown", "0;33"),
("Blue", "0;34"),
("Purple", "0;35"),
("Cyan", "0;36"),
("LightGray", "0;37"),
("DarkGray", "1;30"),
("LightRed", "1;31"),
("LightGreen", "1;32"),
("Yellow", "1;33"),
("LightBlue", "1;34"),
("LightPurple", "1;35"),
("LightCyan", "1;36"),
("White", "1;37"),
)
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
if width is None:
width = self.terminal_width
if align == "right":
if self._write_pos + len(text) > width:
# we don't fit on the current line, create a new line
self.write("\n")
self.write(" "*(width - self._write_pos - len(text)))
if not self._force_colors and hasattr(sys.stdout, 'isatty') and not \
sys.stdout.isatty():
# the stdout is not a terminal, this for example happens if the
# output is piped to less, e.g. "bin/test | less". In this case,
# the terminal control sequences would be printed verbatim, so
# don't use any colors.
color = ""
elif sys.platform == "win32":
# Windows consoles don't support ANSI escape sequences
color = ""
elif not self._colors:
color = ""
if self._line_wrap:
if text[0] != "\n":
sys.stdout.write("\n")
# Avoid UnicodeEncodeError when printing out test failures
if PY3 and IS_WINDOWS:
text = text.encode('raw_unicode_escape').decode('utf8', 'ignore')
elif PY3 and not sys.stdout.encoding.lower().startswith('utf'):
text = text.encode(sys.stdout.encoding, 'backslashreplace'
).decode(sys.stdout.encoding)
if color == "":
sys.stdout.write(text)
else:
sys.stdout.write("%s%s%s" %
(c_color % colors[color], text, c_normal))
sys.stdout.flush()
l = text.rfind("\n")
if l == -1:
self._write_pos += len(text)
else:
self._write_pos = len(text) - l - 1
self._line_wrap = self._write_pos >= width
self._write_pos %= width
def write_center(self, text, delim="="):
width = self.terminal_width
if text != "":
text = " %s " % text
idx = (width - len(text)) // 2
t = delim*idx + text + delim*(width - idx - len(text))
self.write(t + "\n")
def write_exception(self, e, val, tb):
t = traceback.extract_tb(tb)
# remove the first item, as that is always runtests.py
t = t[1:]
t = traceback.format_list(t)
self.write("".join(t))
t = traceback.format_exception_only(e, val)
self.write("".join(t))
def start(self, seed=None, msg="test process starts"):
self.write_center(msg)
executable = sys.executable
v = tuple(sys.version_info)
python_version = "%s.%s.%s-%s-%s" % v
implementation = platform.python_implementation()
if implementation == 'PyPy':
implementation += " %s.%s.%s-%s-%s" % sys.pypy_version_info
self.write("executable: %s (%s) [%s]\n" %
(executable, python_version, implementation))
from .misc import ARCH
self.write("architecture: %s\n" % ARCH)
from sympy.core.cache import USE_CACHE
self.write("cache: %s\n" % USE_CACHE)
from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY
version = ''
if GROUND_TYPES =='gmpy':
if HAS_GMPY == 1:
import gmpy
elif HAS_GMPY == 2:
import gmpy2 as gmpy
version = gmpy.version()
self.write("ground types: %s %s\n" % (GROUND_TYPES, version))
if seed is not None:
self.write("random seed: %d\n" % seed)
from .misc import HASH_RANDOMIZATION
self.write("hash randomization: ")
hash_seed = os.getenv("PYTHONHASHSEED") or '0'
if HASH_RANDOMIZATION and (hash_seed == "random" or int(hash_seed)):
self.write("on (PYTHONHASHSEED=%s)\n" % hash_seed)
else:
self.write("off\n")
if self._split:
self.write("split: %s\n" % self._split)
self.write('\n')
self._t_start = clock()
def finish(self):
self._t_end = clock()
self.write("\n")
global text, linelen
text = "tests finished: %d passed, " % self._passed
linelen = len(text)
def add_text(mytext):
global text, linelen
"""Break new text if too long."""
if linelen + len(mytext) > self.terminal_width:
text += '\n'
linelen = 0
text += mytext
linelen += len(mytext)
if len(self._failed) > 0:
add_text("%d failed, " % len(self._failed))
if len(self._failed_doctest) > 0:
add_text("%d failed, " % len(self._failed_doctest))
if self._skipped > 0:
add_text("%d skipped, " % self._skipped)
if self._xfailed > 0:
add_text("%d expected to fail, " % self._xfailed)
if len(self._xpassed) > 0:
add_text("%d expected to fail but passed, " % len(self._xpassed))
if len(self._exceptions) > 0:
add_text("%d exceptions, " % len(self._exceptions))
add_text("in %.2f seconds" % (self._t_end - self._t_start))
if len(self._xpassed) > 0:
self.write_center("xpassed tests", "_")
for e in self._xpassed:
self.write("%s: %s\n" % (e[0], e[1]))
self.write("\n")
if self._tb_style != "no" and len(self._exceptions) > 0:
for e in self._exceptions:
filename, f, (t, val, tb) = e
self.write_center("", "_")
if f is None:
s = "%s" % filename
else:
s = "%s:%s" % (filename, f.__name__)
self.write_center(s, "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed) > 0:
for e in self._failed:
filename, f, (t, val, tb) = e
self.write_center("", "_")
self.write_center("%s:%s" % (filename, f.__name__), "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed_doctest) > 0:
for e in self._failed_doctest:
filename, msg = e
self.write_center("", "_")
self.write_center("%s" % filename, "_")
self.write(msg)
self.write("\n")
self.write_center(text)
ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \
len(self._failed_doctest) == 0
if not ok:
self.write("DO *NOT* COMMIT!\n")
return ok
def entering_filename(self, filename, n):
rel_name = filename[len(self._root_dir) + 1:]
self._active_file = rel_name
self._active_file_error = False
self.write(rel_name)
self.write("[%d] " % n)
def leaving_filename(self):
self.write(" ")
if self._active_file_error:
self.write("[FAIL]", "Red", align="right")
else:
self.write("[OK]", "Green", align="right")
self.write("\n")
if self._verbose:
self.write("\n")
def entering_test(self, f):
self._active_f = f
if self._verbose:
self.write("\n" + f.__name__ + " ")
def test_xfail(self):
self._xfailed += 1
self.write("f", "Green")
def test_xpass(self, v):
message = str(v)
self._xpassed.append((self._active_file, message))
self.write("X", "Green")
def test_fail(self, exc_info):
self._failed.append((self._active_file, self._active_f, exc_info))
self.write("F", "Red")
self._active_file_error = True
def doctest_fail(self, name, error_msg):
# the first line contains "******", remove it:
error_msg = "\n".join(error_msg.split("\n")[1:])
self._failed_doctest.append((name, error_msg))
self.write("F", "Red")
self._active_file_error = True
def test_pass(self, char="."):
self._passed += 1
if self._verbose:
self.write("ok", "Green")
else:
self.write(char, "Green")
def test_skip(self, v=None):
char = "s"
self._skipped += 1
if v is not None:
message = str(v)
if message == "KeyboardInterrupt":
char = "K"
elif message == "Timeout":
char = "T"
elif message == "Slow":
char = "w"
self.write(char, "Blue")
if self._verbose:
self.write(" - ", "Blue")
if v is not None:
self.write(message, "Blue")
def test_exception(self, exc_info):
self._exceptions.append((self._active_file, self._active_f, exc_info))
self.write("E", "Red")
self._active_file_error = True
def import_error(self, filename, exc_info):
self._exceptions.append((filename, None, exc_info))
rel_name = filename[len(self._root_dir) + 1:]
self.write(rel_name)
self.write("[?] Failed to import", "Red")
self.write(" ")
self.write("[FAIL]", "Red", align="right")
self.write("\n")
sympy_dir = get_sympy_dir()
| bsd-3-clause |
gauravmm/Remote-Temperature-Monitor | utilities/colormap/colormaps.py | 28 | 50518 | # New matplotlib colormaps by Nathaniel J. Smith, Stefan van der Walt,
# and (in the case of viridis) Eric Firing.
#
# This file and the colormaps in it are released under the CC0 license /
# public domain dedication. We would appreciate credit if you use or
# redistribute these colormaps, but do not impose any legal restrictions.
#
# To the extent possible under law, the persons who associated CC0 with
# mpl-colormaps have waived all copyright and related or neighboring rights
# to mpl-colormaps.
#
# You should have received a copy of the CC0 legalcode along with this
# work. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
__all__ = ['magma', 'inferno', 'plasma', 'viridis']
_magma_data = [[0.001462, 0.000466, 0.013866],
[0.002258, 0.001295, 0.018331],
[0.003279, 0.002305, 0.023708],
[0.004512, 0.003490, 0.029965],
[0.005950, 0.004843, 0.037130],
[0.007588, 0.006356, 0.044973],
[0.009426, 0.008022, 0.052844],
[0.011465, 0.009828, 0.060750],
[0.013708, 0.011771, 0.068667],
[0.016156, 0.013840, 0.076603],
[0.018815, 0.016026, 0.084584],
[0.021692, 0.018320, 0.092610],
[0.024792, 0.020715, 0.100676],
[0.028123, 0.023201, 0.108787],
[0.031696, 0.025765, 0.116965],
[0.035520, 0.028397, 0.125209],
[0.039608, 0.031090, 0.133515],
[0.043830, 0.033830, 0.141886],
[0.048062, 0.036607, 0.150327],
[0.052320, 0.039407, 0.158841],
[0.056615, 0.042160, 0.167446],
[0.060949, 0.044794, 0.176129],
[0.065330, 0.047318, 0.184892],
[0.069764, 0.049726, 0.193735],
[0.074257, 0.052017, 0.202660],
[0.078815, 0.054184, 0.211667],
[0.083446, 0.056225, 0.220755],
[0.088155, 0.058133, 0.229922],
[0.092949, 0.059904, 0.239164],
[0.097833, 0.061531, 0.248477],
[0.102815, 0.063010, 0.257854],
[0.107899, 0.064335, 0.267289],
[0.113094, 0.065492, 0.276784],
[0.118405, 0.066479, 0.286321],
[0.123833, 0.067295, 0.295879],
[0.129380, 0.067935, 0.305443],
[0.135053, 0.068391, 0.315000],
[0.140858, 0.068654, 0.324538],
[0.146785, 0.068738, 0.334011],
[0.152839, 0.068637, 0.343404],
[0.159018, 0.068354, 0.352688],
[0.165308, 0.067911, 0.361816],
[0.171713, 0.067305, 0.370771],
[0.178212, 0.066576, 0.379497],
[0.184801, 0.065732, 0.387973],
[0.191460, 0.064818, 0.396152],
[0.198177, 0.063862, 0.404009],
[0.204935, 0.062907, 0.411514],
[0.211718, 0.061992, 0.418647],
[0.218512, 0.061158, 0.425392],
[0.225302, 0.060445, 0.431742],
[0.232077, 0.059889, 0.437695],
[0.238826, 0.059517, 0.443256],
[0.245543, 0.059352, 0.448436],
[0.252220, 0.059415, 0.453248],
[0.258857, 0.059706, 0.457710],
[0.265447, 0.060237, 0.461840],
[0.271994, 0.060994, 0.465660],
[0.278493, 0.061978, 0.469190],
[0.284951, 0.063168, 0.472451],
[0.291366, 0.064553, 0.475462],
[0.297740, 0.066117, 0.478243],
[0.304081, 0.067835, 0.480812],
[0.310382, 0.069702, 0.483186],
[0.316654, 0.071690, 0.485380],
[0.322899, 0.073782, 0.487408],
[0.329114, 0.075972, 0.489287],
[0.335308, 0.078236, 0.491024],
[0.341482, 0.080564, 0.492631],
[0.347636, 0.082946, 0.494121],
[0.353773, 0.085373, 0.495501],
[0.359898, 0.087831, 0.496778],
[0.366012, 0.090314, 0.497960],
[0.372116, 0.092816, 0.499053],
[0.378211, 0.095332, 0.500067],
[0.384299, 0.097855, 0.501002],
[0.390384, 0.100379, 0.501864],
[0.396467, 0.102902, 0.502658],
[0.402548, 0.105420, 0.503386],
[0.408629, 0.107930, 0.504052],
[0.414709, 0.110431, 0.504662],
[0.420791, 0.112920, 0.505215],
[0.426877, 0.115395, 0.505714],
[0.432967, 0.117855, 0.506160],
[0.439062, 0.120298, 0.506555],
[0.445163, 0.122724, 0.506901],
[0.451271, 0.125132, 0.507198],
[0.457386, 0.127522, 0.507448],
[0.463508, 0.129893, 0.507652],
[0.469640, 0.132245, 0.507809],
[0.475780, 0.134577, 0.507921],
[0.481929, 0.136891, 0.507989],
[0.488088, 0.139186, 0.508011],
[0.494258, 0.141462, 0.507988],
[0.500438, 0.143719, 0.507920],
[0.506629, 0.145958, 0.507806],
[0.512831, 0.148179, 0.507648],
[0.519045, 0.150383, 0.507443],
[0.525270, 0.152569, 0.507192],
[0.531507, 0.154739, 0.506895],
[0.537755, 0.156894, 0.506551],
[0.544015, 0.159033, 0.506159],
[0.550287, 0.161158, 0.505719],
[0.556571, 0.163269, 0.505230],
[0.562866, 0.165368, 0.504692],
[0.569172, 0.167454, 0.504105],
[0.575490, 0.169530, 0.503466],
[0.581819, 0.171596, 0.502777],
[0.588158, 0.173652, 0.502035],
[0.594508, 0.175701, 0.501241],
[0.600868, 0.177743, 0.500394],
[0.607238, 0.179779, 0.499492],
[0.613617, 0.181811, 0.498536],
[0.620005, 0.183840, 0.497524],
[0.626401, 0.185867, 0.496456],
[0.632805, 0.187893, 0.495332],
[0.639216, 0.189921, 0.494150],
[0.645633, 0.191952, 0.492910],
[0.652056, 0.193986, 0.491611],
[0.658483, 0.196027, 0.490253],
[0.664915, 0.198075, 0.488836],
[0.671349, 0.200133, 0.487358],
[0.677786, 0.202203, 0.485819],
[0.684224, 0.204286, 0.484219],
[0.690661, 0.206384, 0.482558],
[0.697098, 0.208501, 0.480835],
[0.703532, 0.210638, 0.479049],
[0.709962, 0.212797, 0.477201],
[0.716387, 0.214982, 0.475290],
[0.722805, 0.217194, 0.473316],
[0.729216, 0.219437, 0.471279],
[0.735616, 0.221713, 0.469180],
[0.742004, 0.224025, 0.467018],
[0.748378, 0.226377, 0.464794],
[0.754737, 0.228772, 0.462509],
[0.761077, 0.231214, 0.460162],
[0.767398, 0.233705, 0.457755],
[0.773695, 0.236249, 0.455289],
[0.779968, 0.238851, 0.452765],
[0.786212, 0.241514, 0.450184],
[0.792427, 0.244242, 0.447543],
[0.798608, 0.247040, 0.444848],
[0.804752, 0.249911, 0.442102],
[0.810855, 0.252861, 0.439305],
[0.816914, 0.255895, 0.436461],
[0.822926, 0.259016, 0.433573],
[0.828886, 0.262229, 0.430644],
[0.834791, 0.265540, 0.427671],
[0.840636, 0.268953, 0.424666],
[0.846416, 0.272473, 0.421631],
[0.852126, 0.276106, 0.418573],
[0.857763, 0.279857, 0.415496],
[0.863320, 0.283729, 0.412403],
[0.868793, 0.287728, 0.409303],
[0.874176, 0.291859, 0.406205],
[0.879464, 0.296125, 0.403118],
[0.884651, 0.300530, 0.400047],
[0.889731, 0.305079, 0.397002],
[0.894700, 0.309773, 0.393995],
[0.899552, 0.314616, 0.391037],
[0.904281, 0.319610, 0.388137],
[0.908884, 0.324755, 0.385308],
[0.913354, 0.330052, 0.382563],
[0.917689, 0.335500, 0.379915],
[0.921884, 0.341098, 0.377376],
[0.925937, 0.346844, 0.374959],
[0.929845, 0.352734, 0.372677],
[0.933606, 0.358764, 0.370541],
[0.937221, 0.364929, 0.368567],
[0.940687, 0.371224, 0.366762],
[0.944006, 0.377643, 0.365136],
[0.947180, 0.384178, 0.363701],
[0.950210, 0.390820, 0.362468],
[0.953099, 0.397563, 0.361438],
[0.955849, 0.404400, 0.360619],
[0.958464, 0.411324, 0.360014],
[0.960949, 0.418323, 0.359630],
[0.963310, 0.425390, 0.359469],
[0.965549, 0.432519, 0.359529],
[0.967671, 0.439703, 0.359810],
[0.969680, 0.446936, 0.360311],
[0.971582, 0.454210, 0.361030],
[0.973381, 0.461520, 0.361965],
[0.975082, 0.468861, 0.363111],
[0.976690, 0.476226, 0.364466],
[0.978210, 0.483612, 0.366025],
[0.979645, 0.491014, 0.367783],
[0.981000, 0.498428, 0.369734],
[0.982279, 0.505851, 0.371874],
[0.983485, 0.513280, 0.374198],
[0.984622, 0.520713, 0.376698],
[0.985693, 0.528148, 0.379371],
[0.986700, 0.535582, 0.382210],
[0.987646, 0.543015, 0.385210],
[0.988533, 0.550446, 0.388365],
[0.989363, 0.557873, 0.391671],
[0.990138, 0.565296, 0.395122],
[0.990871, 0.572706, 0.398714],
[0.991558, 0.580107, 0.402441],
[0.992196, 0.587502, 0.406299],
[0.992785, 0.594891, 0.410283],
[0.993326, 0.602275, 0.414390],
[0.993834, 0.609644, 0.418613],
[0.994309, 0.616999, 0.422950],
[0.994738, 0.624350, 0.427397],
[0.995122, 0.631696, 0.431951],
[0.995480, 0.639027, 0.436607],
[0.995810, 0.646344, 0.441361],
[0.996096, 0.653659, 0.446213],
[0.996341, 0.660969, 0.451160],
[0.996580, 0.668256, 0.456192],
[0.996775, 0.675541, 0.461314],
[0.996925, 0.682828, 0.466526],
[0.997077, 0.690088, 0.471811],
[0.997186, 0.697349, 0.477182],
[0.997254, 0.704611, 0.482635],
[0.997325, 0.711848, 0.488154],
[0.997351, 0.719089, 0.493755],
[0.997351, 0.726324, 0.499428],
[0.997341, 0.733545, 0.505167],
[0.997285, 0.740772, 0.510983],
[0.997228, 0.747981, 0.516859],
[0.997138, 0.755190, 0.522806],
[0.997019, 0.762398, 0.528821],
[0.996898, 0.769591, 0.534892],
[0.996727, 0.776795, 0.541039],
[0.996571, 0.783977, 0.547233],
[0.996369, 0.791167, 0.553499],
[0.996162, 0.798348, 0.559820],
[0.995932, 0.805527, 0.566202],
[0.995680, 0.812706, 0.572645],
[0.995424, 0.819875, 0.579140],
[0.995131, 0.827052, 0.585701],
[0.994851, 0.834213, 0.592307],
[0.994524, 0.841387, 0.598983],
[0.994222, 0.848540, 0.605696],
[0.993866, 0.855711, 0.612482],
[0.993545, 0.862859, 0.619299],
[0.993170, 0.870024, 0.626189],
[0.992831, 0.877168, 0.633109],
[0.992440, 0.884330, 0.640099],
[0.992089, 0.891470, 0.647116],
[0.991688, 0.898627, 0.654202],
[0.991332, 0.905763, 0.661309],
[0.990930, 0.912915, 0.668481],
[0.990570, 0.920049, 0.675675],
[0.990175, 0.927196, 0.682926],
[0.989815, 0.934329, 0.690198],
[0.989434, 0.941470, 0.697519],
[0.989077, 0.948604, 0.704863],
[0.988717, 0.955742, 0.712242],
[0.988367, 0.962878, 0.719649],
[0.988033, 0.970012, 0.727077],
[0.987691, 0.977154, 0.734536],
[0.987387, 0.984288, 0.742002],
[0.987053, 0.991438, 0.749504]]
_inferno_data = [[0.001462, 0.000466, 0.013866],
[0.002267, 0.001270, 0.018570],
[0.003299, 0.002249, 0.024239],
[0.004547, 0.003392, 0.030909],
[0.006006, 0.004692, 0.038558],
[0.007676, 0.006136, 0.046836],
[0.009561, 0.007713, 0.055143],
[0.011663, 0.009417, 0.063460],
[0.013995, 0.011225, 0.071862],
[0.016561, 0.013136, 0.080282],
[0.019373, 0.015133, 0.088767],
[0.022447, 0.017199, 0.097327],
[0.025793, 0.019331, 0.105930],
[0.029432, 0.021503, 0.114621],
[0.033385, 0.023702, 0.123397],
[0.037668, 0.025921, 0.132232],
[0.042253, 0.028139, 0.141141],
[0.046915, 0.030324, 0.150164],
[0.051644, 0.032474, 0.159254],
[0.056449, 0.034569, 0.168414],
[0.061340, 0.036590, 0.177642],
[0.066331, 0.038504, 0.186962],
[0.071429, 0.040294, 0.196354],
[0.076637, 0.041905, 0.205799],
[0.081962, 0.043328, 0.215289],
[0.087411, 0.044556, 0.224813],
[0.092990, 0.045583, 0.234358],
[0.098702, 0.046402, 0.243904],
[0.104551, 0.047008, 0.253430],
[0.110536, 0.047399, 0.262912],
[0.116656, 0.047574, 0.272321],
[0.122908, 0.047536, 0.281624],
[0.129285, 0.047293, 0.290788],
[0.135778, 0.046856, 0.299776],
[0.142378, 0.046242, 0.308553],
[0.149073, 0.045468, 0.317085],
[0.155850, 0.044559, 0.325338],
[0.162689, 0.043554, 0.333277],
[0.169575, 0.042489, 0.340874],
[0.176493, 0.041402, 0.348111],
[0.183429, 0.040329, 0.354971],
[0.190367, 0.039309, 0.361447],
[0.197297, 0.038400, 0.367535],
[0.204209, 0.037632, 0.373238],
[0.211095, 0.037030, 0.378563],
[0.217949, 0.036615, 0.383522],
[0.224763, 0.036405, 0.388129],
[0.231538, 0.036405, 0.392400],
[0.238273, 0.036621, 0.396353],
[0.244967, 0.037055, 0.400007],
[0.251620, 0.037705, 0.403378],
[0.258234, 0.038571, 0.406485],
[0.264810, 0.039647, 0.409345],
[0.271347, 0.040922, 0.411976],
[0.277850, 0.042353, 0.414392],
[0.284321, 0.043933, 0.416608],
[0.290763, 0.045644, 0.418637],
[0.297178, 0.047470, 0.420491],
[0.303568, 0.049396, 0.422182],
[0.309935, 0.051407, 0.423721],
[0.316282, 0.053490, 0.425116],
[0.322610, 0.055634, 0.426377],
[0.328921, 0.057827, 0.427511],
[0.335217, 0.060060, 0.428524],
[0.341500, 0.062325, 0.429425],
[0.347771, 0.064616, 0.430217],
[0.354032, 0.066925, 0.430906],
[0.360284, 0.069247, 0.431497],
[0.366529, 0.071579, 0.431994],
[0.372768, 0.073915, 0.432400],
[0.379001, 0.076253, 0.432719],
[0.385228, 0.078591, 0.432955],
[0.391453, 0.080927, 0.433109],
[0.397674, 0.083257, 0.433183],
[0.403894, 0.085580, 0.433179],
[0.410113, 0.087896, 0.433098],
[0.416331, 0.090203, 0.432943],
[0.422549, 0.092501, 0.432714],
[0.428768, 0.094790, 0.432412],
[0.434987, 0.097069, 0.432039],
[0.441207, 0.099338, 0.431594],
[0.447428, 0.101597, 0.431080],
[0.453651, 0.103848, 0.430498],
[0.459875, 0.106089, 0.429846],
[0.466100, 0.108322, 0.429125],
[0.472328, 0.110547, 0.428334],
[0.478558, 0.112764, 0.427475],
[0.484789, 0.114974, 0.426548],
[0.491022, 0.117179, 0.425552],
[0.497257, 0.119379, 0.424488],
[0.503493, 0.121575, 0.423356],
[0.509730, 0.123769, 0.422156],
[0.515967, 0.125960, 0.420887],
[0.522206, 0.128150, 0.419549],
[0.528444, 0.130341, 0.418142],
[0.534683, 0.132534, 0.416667],
[0.540920, 0.134729, 0.415123],
[0.547157, 0.136929, 0.413511],
[0.553392, 0.139134, 0.411829],
[0.559624, 0.141346, 0.410078],
[0.565854, 0.143567, 0.408258],
[0.572081, 0.145797, 0.406369],
[0.578304, 0.148039, 0.404411],
[0.584521, 0.150294, 0.402385],
[0.590734, 0.152563, 0.400290],
[0.596940, 0.154848, 0.398125],
[0.603139, 0.157151, 0.395891],
[0.609330, 0.159474, 0.393589],
[0.615513, 0.161817, 0.391219],
[0.621685, 0.164184, 0.388781],
[0.627847, 0.166575, 0.386276],
[0.633998, 0.168992, 0.383704],
[0.640135, 0.171438, 0.381065],
[0.646260, 0.173914, 0.378359],
[0.652369, 0.176421, 0.375586],
[0.658463, 0.178962, 0.372748],
[0.664540, 0.181539, 0.369846],
[0.670599, 0.184153, 0.366879],
[0.676638, 0.186807, 0.363849],
[0.682656, 0.189501, 0.360757],
[0.688653, 0.192239, 0.357603],
[0.694627, 0.195021, 0.354388],
[0.700576, 0.197851, 0.351113],
[0.706500, 0.200728, 0.347777],
[0.712396, 0.203656, 0.344383],
[0.718264, 0.206636, 0.340931],
[0.724103, 0.209670, 0.337424],
[0.729909, 0.212759, 0.333861],
[0.735683, 0.215906, 0.330245],
[0.741423, 0.219112, 0.326576],
[0.747127, 0.222378, 0.322856],
[0.752794, 0.225706, 0.319085],
[0.758422, 0.229097, 0.315266],
[0.764010, 0.232554, 0.311399],
[0.769556, 0.236077, 0.307485],
[0.775059, 0.239667, 0.303526],
[0.780517, 0.243327, 0.299523],
[0.785929, 0.247056, 0.295477],
[0.791293, 0.250856, 0.291390],
[0.796607, 0.254728, 0.287264],
[0.801871, 0.258674, 0.283099],
[0.807082, 0.262692, 0.278898],
[0.812239, 0.266786, 0.274661],
[0.817341, 0.270954, 0.270390],
[0.822386, 0.275197, 0.266085],
[0.827372, 0.279517, 0.261750],
[0.832299, 0.283913, 0.257383],
[0.837165, 0.288385, 0.252988],
[0.841969, 0.292933, 0.248564],
[0.846709, 0.297559, 0.244113],
[0.851384, 0.302260, 0.239636],
[0.855992, 0.307038, 0.235133],
[0.860533, 0.311892, 0.230606],
[0.865006, 0.316822, 0.226055],
[0.869409, 0.321827, 0.221482],
[0.873741, 0.326906, 0.216886],
[0.878001, 0.332060, 0.212268],
[0.882188, 0.337287, 0.207628],
[0.886302, 0.342586, 0.202968],
[0.890341, 0.347957, 0.198286],
[0.894305, 0.353399, 0.193584],
[0.898192, 0.358911, 0.188860],
[0.902003, 0.364492, 0.184116],
[0.905735, 0.370140, 0.179350],
[0.909390, 0.375856, 0.174563],
[0.912966, 0.381636, 0.169755],
[0.916462, 0.387481, 0.164924],
[0.919879, 0.393389, 0.160070],
[0.923215, 0.399359, 0.155193],
[0.926470, 0.405389, 0.150292],
[0.929644, 0.411479, 0.145367],
[0.932737, 0.417627, 0.140417],
[0.935747, 0.423831, 0.135440],
[0.938675, 0.430091, 0.130438],
[0.941521, 0.436405, 0.125409],
[0.944285, 0.442772, 0.120354],
[0.946965, 0.449191, 0.115272],
[0.949562, 0.455660, 0.110164],
[0.952075, 0.462178, 0.105031],
[0.954506, 0.468744, 0.099874],
[0.956852, 0.475356, 0.094695],
[0.959114, 0.482014, 0.089499],
[0.961293, 0.488716, 0.084289],
[0.963387, 0.495462, 0.079073],
[0.965397, 0.502249, 0.073859],
[0.967322, 0.509078, 0.068659],
[0.969163, 0.515946, 0.063488],
[0.970919, 0.522853, 0.058367],
[0.972590, 0.529798, 0.053324],
[0.974176, 0.536780, 0.048392],
[0.975677, 0.543798, 0.043618],
[0.977092, 0.550850, 0.039050],
[0.978422, 0.557937, 0.034931],
[0.979666, 0.565057, 0.031409],
[0.980824, 0.572209, 0.028508],
[0.981895, 0.579392, 0.026250],
[0.982881, 0.586606, 0.024661],
[0.983779, 0.593849, 0.023770],
[0.984591, 0.601122, 0.023606],
[0.985315, 0.608422, 0.024202],
[0.985952, 0.615750, 0.025592],
[0.986502, 0.623105, 0.027814],
[0.986964, 0.630485, 0.030908],
[0.987337, 0.637890, 0.034916],
[0.987622, 0.645320, 0.039886],
[0.987819, 0.652773, 0.045581],
[0.987926, 0.660250, 0.051750],
[0.987945, 0.667748, 0.058329],
[0.987874, 0.675267, 0.065257],
[0.987714, 0.682807, 0.072489],
[0.987464, 0.690366, 0.079990],
[0.987124, 0.697944, 0.087731],
[0.986694, 0.705540, 0.095694],
[0.986175, 0.713153, 0.103863],
[0.985566, 0.720782, 0.112229],
[0.984865, 0.728427, 0.120785],
[0.984075, 0.736087, 0.129527],
[0.983196, 0.743758, 0.138453],
[0.982228, 0.751442, 0.147565],
[0.981173, 0.759135, 0.156863],
[0.980032, 0.766837, 0.166353],
[0.978806, 0.774545, 0.176037],
[0.977497, 0.782258, 0.185923],
[0.976108, 0.789974, 0.196018],
[0.974638, 0.797692, 0.206332],
[0.973088, 0.805409, 0.216877],
[0.971468, 0.813122, 0.227658],
[0.969783, 0.820825, 0.238686],
[0.968041, 0.828515, 0.249972],
[0.966243, 0.836191, 0.261534],
[0.964394, 0.843848, 0.273391],
[0.962517, 0.851476, 0.285546],
[0.960626, 0.859069, 0.298010],
[0.958720, 0.866624, 0.310820],
[0.956834, 0.874129, 0.323974],
[0.954997, 0.881569, 0.337475],
[0.953215, 0.888942, 0.351369],
[0.951546, 0.896226, 0.365627],
[0.950018, 0.903409, 0.380271],
[0.948683, 0.910473, 0.395289],
[0.947594, 0.917399, 0.410665],
[0.946809, 0.924168, 0.426373],
[0.946392, 0.930761, 0.442367],
[0.946403, 0.937159, 0.458592],
[0.946903, 0.943348, 0.474970],
[0.947937, 0.949318, 0.491426],
[0.949545, 0.955063, 0.507860],
[0.951740, 0.960587, 0.524203],
[0.954529, 0.965896, 0.540361],
[0.957896, 0.971003, 0.556275],
[0.961812, 0.975924, 0.571925],
[0.966249, 0.980678, 0.587206],
[0.971162, 0.985282, 0.602154],
[0.976511, 0.989753, 0.616760],
[0.982257, 0.994109, 0.631017],
[0.988362, 0.998364, 0.644924]]
_plasma_data = [[0.050383, 0.029803, 0.527975],
[0.063536, 0.028426, 0.533124],
[0.075353, 0.027206, 0.538007],
[0.086222, 0.026125, 0.542658],
[0.096379, 0.025165, 0.547103],
[0.105980, 0.024309, 0.551368],
[0.115124, 0.023556, 0.555468],
[0.123903, 0.022878, 0.559423],
[0.132381, 0.022258, 0.563250],
[0.140603, 0.021687, 0.566959],
[0.148607, 0.021154, 0.570562],
[0.156421, 0.020651, 0.574065],
[0.164070, 0.020171, 0.577478],
[0.171574, 0.019706, 0.580806],
[0.178950, 0.019252, 0.584054],
[0.186213, 0.018803, 0.587228],
[0.193374, 0.018354, 0.590330],
[0.200445, 0.017902, 0.593364],
[0.207435, 0.017442, 0.596333],
[0.214350, 0.016973, 0.599239],
[0.221197, 0.016497, 0.602083],
[0.227983, 0.016007, 0.604867],
[0.234715, 0.015502, 0.607592],
[0.241396, 0.014979, 0.610259],
[0.248032, 0.014439, 0.612868],
[0.254627, 0.013882, 0.615419],
[0.261183, 0.013308, 0.617911],
[0.267703, 0.012716, 0.620346],
[0.274191, 0.012109, 0.622722],
[0.280648, 0.011488, 0.625038],
[0.287076, 0.010855, 0.627295],
[0.293478, 0.010213, 0.629490],
[0.299855, 0.009561, 0.631624],
[0.306210, 0.008902, 0.633694],
[0.312543, 0.008239, 0.635700],
[0.318856, 0.007576, 0.637640],
[0.325150, 0.006915, 0.639512],
[0.331426, 0.006261, 0.641316],
[0.337683, 0.005618, 0.643049],
[0.343925, 0.004991, 0.644710],
[0.350150, 0.004382, 0.646298],
[0.356359, 0.003798, 0.647810],
[0.362553, 0.003243, 0.649245],
[0.368733, 0.002724, 0.650601],
[0.374897, 0.002245, 0.651876],
[0.381047, 0.001814, 0.653068],
[0.387183, 0.001434, 0.654177],
[0.393304, 0.001114, 0.655199],
[0.399411, 0.000859, 0.656133],
[0.405503, 0.000678, 0.656977],
[0.411580, 0.000577, 0.657730],
[0.417642, 0.000564, 0.658390],
[0.423689, 0.000646, 0.658956],
[0.429719, 0.000831, 0.659425],
[0.435734, 0.001127, 0.659797],
[0.441732, 0.001540, 0.660069],
[0.447714, 0.002080, 0.660240],
[0.453677, 0.002755, 0.660310],
[0.459623, 0.003574, 0.660277],
[0.465550, 0.004545, 0.660139],
[0.471457, 0.005678, 0.659897],
[0.477344, 0.006980, 0.659549],
[0.483210, 0.008460, 0.659095],
[0.489055, 0.010127, 0.658534],
[0.494877, 0.011990, 0.657865],
[0.500678, 0.014055, 0.657088],
[0.506454, 0.016333, 0.656202],
[0.512206, 0.018833, 0.655209],
[0.517933, 0.021563, 0.654109],
[0.523633, 0.024532, 0.652901],
[0.529306, 0.027747, 0.651586],
[0.534952, 0.031217, 0.650165],
[0.540570, 0.034950, 0.648640],
[0.546157, 0.038954, 0.647010],
[0.551715, 0.043136, 0.645277],
[0.557243, 0.047331, 0.643443],
[0.562738, 0.051545, 0.641509],
[0.568201, 0.055778, 0.639477],
[0.573632, 0.060028, 0.637349],
[0.579029, 0.064296, 0.635126],
[0.584391, 0.068579, 0.632812],
[0.589719, 0.072878, 0.630408],
[0.595011, 0.077190, 0.627917],
[0.600266, 0.081516, 0.625342],
[0.605485, 0.085854, 0.622686],
[0.610667, 0.090204, 0.619951],
[0.615812, 0.094564, 0.617140],
[0.620919, 0.098934, 0.614257],
[0.625987, 0.103312, 0.611305],
[0.631017, 0.107699, 0.608287],
[0.636008, 0.112092, 0.605205],
[0.640959, 0.116492, 0.602065],
[0.645872, 0.120898, 0.598867],
[0.650746, 0.125309, 0.595617],
[0.655580, 0.129725, 0.592317],
[0.660374, 0.134144, 0.588971],
[0.665129, 0.138566, 0.585582],
[0.669845, 0.142992, 0.582154],
[0.674522, 0.147419, 0.578688],
[0.679160, 0.151848, 0.575189],
[0.683758, 0.156278, 0.571660],
[0.688318, 0.160709, 0.568103],
[0.692840, 0.165141, 0.564522],
[0.697324, 0.169573, 0.560919],
[0.701769, 0.174005, 0.557296],
[0.706178, 0.178437, 0.553657],
[0.710549, 0.182868, 0.550004],
[0.714883, 0.187299, 0.546338],
[0.719181, 0.191729, 0.542663],
[0.723444, 0.196158, 0.538981],
[0.727670, 0.200586, 0.535293],
[0.731862, 0.205013, 0.531601],
[0.736019, 0.209439, 0.527908],
[0.740143, 0.213864, 0.524216],
[0.744232, 0.218288, 0.520524],
[0.748289, 0.222711, 0.516834],
[0.752312, 0.227133, 0.513149],
[0.756304, 0.231555, 0.509468],
[0.760264, 0.235976, 0.505794],
[0.764193, 0.240396, 0.502126],
[0.768090, 0.244817, 0.498465],
[0.771958, 0.249237, 0.494813],
[0.775796, 0.253658, 0.491171],
[0.779604, 0.258078, 0.487539],
[0.783383, 0.262500, 0.483918],
[0.787133, 0.266922, 0.480307],
[0.790855, 0.271345, 0.476706],
[0.794549, 0.275770, 0.473117],
[0.798216, 0.280197, 0.469538],
[0.801855, 0.284626, 0.465971],
[0.805467, 0.289057, 0.462415],
[0.809052, 0.293491, 0.458870],
[0.812612, 0.297928, 0.455338],
[0.816144, 0.302368, 0.451816],
[0.819651, 0.306812, 0.448306],
[0.823132, 0.311261, 0.444806],
[0.826588, 0.315714, 0.441316],
[0.830018, 0.320172, 0.437836],
[0.833422, 0.324635, 0.434366],
[0.836801, 0.329105, 0.430905],
[0.840155, 0.333580, 0.427455],
[0.843484, 0.338062, 0.424013],
[0.846788, 0.342551, 0.420579],
[0.850066, 0.347048, 0.417153],
[0.853319, 0.351553, 0.413734],
[0.856547, 0.356066, 0.410322],
[0.859750, 0.360588, 0.406917],
[0.862927, 0.365119, 0.403519],
[0.866078, 0.369660, 0.400126],
[0.869203, 0.374212, 0.396738],
[0.872303, 0.378774, 0.393355],
[0.875376, 0.383347, 0.389976],
[0.878423, 0.387932, 0.386600],
[0.881443, 0.392529, 0.383229],
[0.884436, 0.397139, 0.379860],
[0.887402, 0.401762, 0.376494],
[0.890340, 0.406398, 0.373130],
[0.893250, 0.411048, 0.369768],
[0.896131, 0.415712, 0.366407],
[0.898984, 0.420392, 0.363047],
[0.901807, 0.425087, 0.359688],
[0.904601, 0.429797, 0.356329],
[0.907365, 0.434524, 0.352970],
[0.910098, 0.439268, 0.349610],
[0.912800, 0.444029, 0.346251],
[0.915471, 0.448807, 0.342890],
[0.918109, 0.453603, 0.339529],
[0.920714, 0.458417, 0.336166],
[0.923287, 0.463251, 0.332801],
[0.925825, 0.468103, 0.329435],
[0.928329, 0.472975, 0.326067],
[0.930798, 0.477867, 0.322697],
[0.933232, 0.482780, 0.319325],
[0.935630, 0.487712, 0.315952],
[0.937990, 0.492667, 0.312575],
[0.940313, 0.497642, 0.309197],
[0.942598, 0.502639, 0.305816],
[0.944844, 0.507658, 0.302433],
[0.947051, 0.512699, 0.299049],
[0.949217, 0.517763, 0.295662],
[0.951344, 0.522850, 0.292275],
[0.953428, 0.527960, 0.288883],
[0.955470, 0.533093, 0.285490],
[0.957469, 0.538250, 0.282096],
[0.959424, 0.543431, 0.278701],
[0.961336, 0.548636, 0.275305],
[0.963203, 0.553865, 0.271909],
[0.965024, 0.559118, 0.268513],
[0.966798, 0.564396, 0.265118],
[0.968526, 0.569700, 0.261721],
[0.970205, 0.575028, 0.258325],
[0.971835, 0.580382, 0.254931],
[0.973416, 0.585761, 0.251540],
[0.974947, 0.591165, 0.248151],
[0.976428, 0.596595, 0.244767],
[0.977856, 0.602051, 0.241387],
[0.979233, 0.607532, 0.238013],
[0.980556, 0.613039, 0.234646],
[0.981826, 0.618572, 0.231287],
[0.983041, 0.624131, 0.227937],
[0.984199, 0.629718, 0.224595],
[0.985301, 0.635330, 0.221265],
[0.986345, 0.640969, 0.217948],
[0.987332, 0.646633, 0.214648],
[0.988260, 0.652325, 0.211364],
[0.989128, 0.658043, 0.208100],
[0.989935, 0.663787, 0.204859],
[0.990681, 0.669558, 0.201642],
[0.991365, 0.675355, 0.198453],
[0.991985, 0.681179, 0.195295],
[0.992541, 0.687030, 0.192170],
[0.993032, 0.692907, 0.189084],
[0.993456, 0.698810, 0.186041],
[0.993814, 0.704741, 0.183043],
[0.994103, 0.710698, 0.180097],
[0.994324, 0.716681, 0.177208],
[0.994474, 0.722691, 0.174381],
[0.994553, 0.728728, 0.171622],
[0.994561, 0.734791, 0.168938],
[0.994495, 0.740880, 0.166335],
[0.994355, 0.746995, 0.163821],
[0.994141, 0.753137, 0.161404],
[0.993851, 0.759304, 0.159092],
[0.993482, 0.765499, 0.156891],
[0.993033, 0.771720, 0.154808],
[0.992505, 0.777967, 0.152855],
[0.991897, 0.784239, 0.151042],
[0.991209, 0.790537, 0.149377],
[0.990439, 0.796859, 0.147870],
[0.989587, 0.803205, 0.146529],
[0.988648, 0.809579, 0.145357],
[0.987621, 0.815978, 0.144363],
[0.986509, 0.822401, 0.143557],
[0.985314, 0.828846, 0.142945],
[0.984031, 0.835315, 0.142528],
[0.982653, 0.841812, 0.142303],
[0.981190, 0.848329, 0.142279],
[0.979644, 0.854866, 0.142453],
[0.977995, 0.861432, 0.142808],
[0.976265, 0.868016, 0.143351],
[0.974443, 0.874622, 0.144061],
[0.972530, 0.881250, 0.144923],
[0.970533, 0.887896, 0.145919],
[0.968443, 0.894564, 0.147014],
[0.966271, 0.901249, 0.148180],
[0.964021, 0.907950, 0.149370],
[0.961681, 0.914672, 0.150520],
[0.959276, 0.921407, 0.151566],
[0.956808, 0.928152, 0.152409],
[0.954287, 0.934908, 0.152921],
[0.951726, 0.941671, 0.152925],
[0.949151, 0.948435, 0.152178],
[0.946602, 0.955190, 0.150328],
[0.944152, 0.961916, 0.146861],
[0.941896, 0.968590, 0.140956],
[0.940015, 0.975158, 0.131326]]
_viridis_data = [[0.267004, 0.004874, 0.329415],
[0.268510, 0.009605, 0.335427],
[0.269944, 0.014625, 0.341379],
[0.271305, 0.019942, 0.347269],
[0.272594, 0.025563, 0.353093],
[0.273809, 0.031497, 0.358853],
[0.274952, 0.037752, 0.364543],
[0.276022, 0.044167, 0.370164],
[0.277018, 0.050344, 0.375715],
[0.277941, 0.056324, 0.381191],
[0.278791, 0.062145, 0.386592],
[0.279566, 0.067836, 0.391917],
[0.280267, 0.073417, 0.397163],
[0.280894, 0.078907, 0.402329],
[0.281446, 0.084320, 0.407414],
[0.281924, 0.089666, 0.412415],
[0.282327, 0.094955, 0.417331],
[0.282656, 0.100196, 0.422160],
[0.282910, 0.105393, 0.426902],
[0.283091, 0.110553, 0.431554],
[0.283197, 0.115680, 0.436115],
[0.283229, 0.120777, 0.440584],
[0.283187, 0.125848, 0.444960],
[0.283072, 0.130895, 0.449241],
[0.282884, 0.135920, 0.453427],
[0.282623, 0.140926, 0.457517],
[0.282290, 0.145912, 0.461510],
[0.281887, 0.150881, 0.465405],
[0.281412, 0.155834, 0.469201],
[0.280868, 0.160771, 0.472899],
[0.280255, 0.165693, 0.476498],
[0.279574, 0.170599, 0.479997],
[0.278826, 0.175490, 0.483397],
[0.278012, 0.180367, 0.486697],
[0.277134, 0.185228, 0.489898],
[0.276194, 0.190074, 0.493001],
[0.275191, 0.194905, 0.496005],
[0.274128, 0.199721, 0.498911],
[0.273006, 0.204520, 0.501721],
[0.271828, 0.209303, 0.504434],
[0.270595, 0.214069, 0.507052],
[0.269308, 0.218818, 0.509577],
[0.267968, 0.223549, 0.512008],
[0.266580, 0.228262, 0.514349],
[0.265145, 0.232956, 0.516599],
[0.263663, 0.237631, 0.518762],
[0.262138, 0.242286, 0.520837],
[0.260571, 0.246922, 0.522828],
[0.258965, 0.251537, 0.524736],
[0.257322, 0.256130, 0.526563],
[0.255645, 0.260703, 0.528312],
[0.253935, 0.265254, 0.529983],
[0.252194, 0.269783, 0.531579],
[0.250425, 0.274290, 0.533103],
[0.248629, 0.278775, 0.534556],
[0.246811, 0.283237, 0.535941],
[0.244972, 0.287675, 0.537260],
[0.243113, 0.292092, 0.538516],
[0.241237, 0.296485, 0.539709],
[0.239346, 0.300855, 0.540844],
[0.237441, 0.305202, 0.541921],
[0.235526, 0.309527, 0.542944],
[0.233603, 0.313828, 0.543914],
[0.231674, 0.318106, 0.544834],
[0.229739, 0.322361, 0.545706],
[0.227802, 0.326594, 0.546532],
[0.225863, 0.330805, 0.547314],
[0.223925, 0.334994, 0.548053],
[0.221989, 0.339161, 0.548752],
[0.220057, 0.343307, 0.549413],
[0.218130, 0.347432, 0.550038],
[0.216210, 0.351535, 0.550627],
[0.214298, 0.355619, 0.551184],
[0.212395, 0.359683, 0.551710],
[0.210503, 0.363727, 0.552206],
[0.208623, 0.367752, 0.552675],
[0.206756, 0.371758, 0.553117],
[0.204903, 0.375746, 0.553533],
[0.203063, 0.379716, 0.553925],
[0.201239, 0.383670, 0.554294],
[0.199430, 0.387607, 0.554642],
[0.197636, 0.391528, 0.554969],
[0.195860, 0.395433, 0.555276],
[0.194100, 0.399323, 0.555565],
[0.192357, 0.403199, 0.555836],
[0.190631, 0.407061, 0.556089],
[0.188923, 0.410910, 0.556326],
[0.187231, 0.414746, 0.556547],
[0.185556, 0.418570, 0.556753],
[0.183898, 0.422383, 0.556944],
[0.182256, 0.426184, 0.557120],
[0.180629, 0.429975, 0.557282],
[0.179019, 0.433756, 0.557430],
[0.177423, 0.437527, 0.557565],
[0.175841, 0.441290, 0.557685],
[0.174274, 0.445044, 0.557792],
[0.172719, 0.448791, 0.557885],
[0.171176, 0.452530, 0.557965],
[0.169646, 0.456262, 0.558030],
[0.168126, 0.459988, 0.558082],
[0.166617, 0.463708, 0.558119],
[0.165117, 0.467423, 0.558141],
[0.163625, 0.471133, 0.558148],
[0.162142, 0.474838, 0.558140],
[0.160665, 0.478540, 0.558115],
[0.159194, 0.482237, 0.558073],
[0.157729, 0.485932, 0.558013],
[0.156270, 0.489624, 0.557936],
[0.154815, 0.493313, 0.557840],
[0.153364, 0.497000, 0.557724],
[0.151918, 0.500685, 0.557587],
[0.150476, 0.504369, 0.557430],
[0.149039, 0.508051, 0.557250],
[0.147607, 0.511733, 0.557049],
[0.146180, 0.515413, 0.556823],
[0.144759, 0.519093, 0.556572],
[0.143343, 0.522773, 0.556295],
[0.141935, 0.526453, 0.555991],
[0.140536, 0.530132, 0.555659],
[0.139147, 0.533812, 0.555298],
[0.137770, 0.537492, 0.554906],
[0.136408, 0.541173, 0.554483],
[0.135066, 0.544853, 0.554029],
[0.133743, 0.548535, 0.553541],
[0.132444, 0.552216, 0.553018],
[0.131172, 0.555899, 0.552459],
[0.129933, 0.559582, 0.551864],
[0.128729, 0.563265, 0.551229],
[0.127568, 0.566949, 0.550556],
[0.126453, 0.570633, 0.549841],
[0.125394, 0.574318, 0.549086],
[0.124395, 0.578002, 0.548287],
[0.123463, 0.581687, 0.547445],
[0.122606, 0.585371, 0.546557],
[0.121831, 0.589055, 0.545623],
[0.121148, 0.592739, 0.544641],
[0.120565, 0.596422, 0.543611],
[0.120092, 0.600104, 0.542530],
[0.119738, 0.603785, 0.541400],
[0.119512, 0.607464, 0.540218],
[0.119423, 0.611141, 0.538982],
[0.119483, 0.614817, 0.537692],
[0.119699, 0.618490, 0.536347],
[0.120081, 0.622161, 0.534946],
[0.120638, 0.625828, 0.533488],
[0.121380, 0.629492, 0.531973],
[0.122312, 0.633153, 0.530398],
[0.123444, 0.636809, 0.528763],
[0.124780, 0.640461, 0.527068],
[0.126326, 0.644107, 0.525311],
[0.128087, 0.647749, 0.523491],
[0.130067, 0.651384, 0.521608],
[0.132268, 0.655014, 0.519661],
[0.134692, 0.658636, 0.517649],
[0.137339, 0.662252, 0.515571],
[0.140210, 0.665859, 0.513427],
[0.143303, 0.669459, 0.511215],
[0.146616, 0.673050, 0.508936],
[0.150148, 0.676631, 0.506589],
[0.153894, 0.680203, 0.504172],
[0.157851, 0.683765, 0.501686],
[0.162016, 0.687316, 0.499129],
[0.166383, 0.690856, 0.496502],
[0.170948, 0.694384, 0.493803],
[0.175707, 0.697900, 0.491033],
[0.180653, 0.701402, 0.488189],
[0.185783, 0.704891, 0.485273],
[0.191090, 0.708366, 0.482284],
[0.196571, 0.711827, 0.479221],
[0.202219, 0.715272, 0.476084],
[0.208030, 0.718701, 0.472873],
[0.214000, 0.722114, 0.469588],
[0.220124, 0.725509, 0.466226],
[0.226397, 0.728888, 0.462789],
[0.232815, 0.732247, 0.459277],
[0.239374, 0.735588, 0.455688],
[0.246070, 0.738910, 0.452024],
[0.252899, 0.742211, 0.448284],
[0.259857, 0.745492, 0.444467],
[0.266941, 0.748751, 0.440573],
[0.274149, 0.751988, 0.436601],
[0.281477, 0.755203, 0.432552],
[0.288921, 0.758394, 0.428426],
[0.296479, 0.761561, 0.424223],
[0.304148, 0.764704, 0.419943],
[0.311925, 0.767822, 0.415586],
[0.319809, 0.770914, 0.411152],
[0.327796, 0.773980, 0.406640],
[0.335885, 0.777018, 0.402049],
[0.344074, 0.780029, 0.397381],
[0.352360, 0.783011, 0.392636],
[0.360741, 0.785964, 0.387814],
[0.369214, 0.788888, 0.382914],
[0.377779, 0.791781, 0.377939],
[0.386433, 0.794644, 0.372886],
[0.395174, 0.797475, 0.367757],
[0.404001, 0.800275, 0.362552],
[0.412913, 0.803041, 0.357269],
[0.421908, 0.805774, 0.351910],
[0.430983, 0.808473, 0.346476],
[0.440137, 0.811138, 0.340967],
[0.449368, 0.813768, 0.335384],
[0.458674, 0.816363, 0.329727],
[0.468053, 0.818921, 0.323998],
[0.477504, 0.821444, 0.318195],
[0.487026, 0.823929, 0.312321],
[0.496615, 0.826376, 0.306377],
[0.506271, 0.828786, 0.300362],
[0.515992, 0.831158, 0.294279],
[0.525776, 0.833491, 0.288127],
[0.535621, 0.835785, 0.281908],
[0.545524, 0.838039, 0.275626],
[0.555484, 0.840254, 0.269281],
[0.565498, 0.842430, 0.262877],
[0.575563, 0.844566, 0.256415],
[0.585678, 0.846661, 0.249897],
[0.595839, 0.848717, 0.243329],
[0.606045, 0.850733, 0.236712],
[0.616293, 0.852709, 0.230052],
[0.626579, 0.854645, 0.223353],
[0.636902, 0.856542, 0.216620],
[0.647257, 0.858400, 0.209861],
[0.657642, 0.860219, 0.203082],
[0.668054, 0.861999, 0.196293],
[0.678489, 0.863742, 0.189503],
[0.688944, 0.865448, 0.182725],
[0.699415, 0.867117, 0.175971],
[0.709898, 0.868751, 0.169257],
[0.720391, 0.870350, 0.162603],
[0.730889, 0.871916, 0.156029],
[0.741388, 0.873449, 0.149561],
[0.751884, 0.874951, 0.143228],
[0.762373, 0.876424, 0.137064],
[0.772852, 0.877868, 0.131109],
[0.783315, 0.879285, 0.125405],
[0.793760, 0.880678, 0.120005],
[0.804182, 0.882046, 0.114965],
[0.814576, 0.883393, 0.110347],
[0.824940, 0.884720, 0.106217],
[0.835270, 0.886029, 0.102646],
[0.845561, 0.887322, 0.099702],
[0.855810, 0.888601, 0.097452],
[0.866013, 0.889868, 0.095953],
[0.876168, 0.891125, 0.095250],
[0.886271, 0.892374, 0.095374],
[0.896320, 0.893616, 0.096335],
[0.906311, 0.894855, 0.098125],
[0.916242, 0.896091, 0.100717],
[0.926106, 0.897330, 0.104071],
[0.935904, 0.898570, 0.108131],
[0.945636, 0.899815, 0.112838],
[0.955300, 0.901065, 0.118128],
[0.964894, 0.902323, 0.123941],
[0.974417, 0.903590, 0.130215],
[0.983868, 0.904867, 0.136897],
[0.993248, 0.906157, 0.143936]]
from matplotlib.colors import ListedColormap
cmaps = {}
for (name, data) in (('magma', _magma_data),
('inferno', _inferno_data),
('plasma', _plasma_data),
('viridis', _viridis_data)):
cmaps[name] = ListedColormap(data, name=name)
magma = cmaps['magma']
inferno = cmaps['inferno']
plasma = cmaps['plasma']
viridis = cmaps['viridis']
| mit |
meduz/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
timkpaine/lantern | tests/plot/test_plot.py | 1 | 1272 | from mock import patch
import matplotlib
matplotlib.use('Agg')
class TestConfig:
def setup(self):
pass
# setup() before each test method
def teardown(self):
pass
# teardown() after each test method
@classmethod
def setup_class(cls):
pass
# setup_class() before any methods in this class
@classmethod
def teardown_class(cls):
pass
# teardown_class() after any methods in this class
def test_all(self):
with patch('lantern.plotting.plot_matplotlib.in_ipynb', create=True) as mock1:
import lantern as l
mock1.return_value = True
df = l.bar()
l.plot(df, 'line', 'matplotlib')
def test_list(self):
with patch('lantern.plotting.plot_matplotlib.in_ipynb', create=True) as mock1:
import lantern as l
mock1.return_value = True
df = l.bar()
l.plot(df, ['line' for _ in df], 'matplotlib')
def test_dict(self):
with patch('lantern.plotting.plot_matplotlib.in_ipynb', create=True) as mock1:
import lantern as l
mock1.return_value = True
df = l.bar()
l.plot(df, {c: 'line' for c in df.columns}, 'matplotlib')
| apache-2.0 |
PrashntS/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 103 | 4394 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
cbmoore/statsmodels | docs/source/plots/graphics_gofplots_qqplot.py | 38 | 1911 | # -*- coding: utf-8 -*-
"""
Created on Sun May 06 05:32:15 2012
Author: Josef Perktold
editted by: Paul Hobson (2012-08-19)
"""
from scipy import stats
from matplotlib import pyplot as plt
import statsmodels.api as sm
#example from docstring
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog, prepend=True)
mod_fit = sm.OLS(data.endog, data.exog).fit()
res = mod_fit.resid
left = -1.8 #x coordinate for text insert
fig = plt.figure()
ax = fig.add_subplot(2, 2, 1)
sm.graphics.qqplot(res, ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, 'no keywords', verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 2)
sm.graphics.qqplot(res, line='s', ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='s'", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 3)
sm.graphics.qqplot(res, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='45', \nfit=True", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 4)
sm.graphics.qqplot(res, dist=stats.t, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "dist=stats.t, \nline='45', \nfit=True",
verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
fig.tight_layout()
plt.gcf()
# example with the new ProbPlot class
import numpy as np
x = np.random.normal(loc=8.25, scale=3.5, size=37)
y = np.random.normal(loc=8.00, scale=3.25, size=37)
pp_x = sm.ProbPlot(x, fit=True)
pp_y = sm.ProbPlot(y, fit=True)
# probability of exceedance
fig2 = pp_x.probplot(exceed=True)
# compare x quantiles to y quantiles
fig3 = pp_x.qqplot(other=pp_y, line='45')
# same as above with probabilities/percentiles
fig4 = pp_x.ppplot(other=pp_y, line='45')
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/event_handling/zoom_window.py | 1 | 2014 | """
===========
Zoom Window
===========
This example shows how to connect events in one window, for example, a mouse
press, to another figure window.
If you click on a point in the first window, the z and y limits of the
second will be adjusted so that the center of the zoom in the second
window will be the x,y coordinates of the clicked point.
Note the diameter of the circles in the scatter are defined in
points**2, so their size is independent of the zoom
"""
import matplotlib.pyplot as plt #import figure, show
import numpy as np
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
figsrc = plt.figure()
figzoom = plt.figure()
axsrc = figsrc.add_subplot(111, xlim=(0, 1), ylim=(0, 1), autoscale_on=False)
axzoom = figzoom.add_subplot(111, xlim=(0.45, 0.55), ylim=(0.4, .6),
autoscale_on=False)
axsrc.set_title('Click to zoom')
axzoom.set_title('zoom window')
x, y, s, c = np.random.rand(4, 200)
s *= 200
axsrc.scatter(x, y, s, c)
axzoom.scatter(x, y, s, c)
def onpress(event):
if event.button != 1:
return
x, y = event.xdata, event.ydata
axzoom.set_xlim(x - 0.1, x + 0.1)
axzoom.set_ylim(y - 0.1, y + 0.1)
figzoom.canvas.draw()
figsrc.canvas.mpl_connect('button_press_event', onpress)
pltshow(plt)
| mit |
cwu2011/seaborn | seaborn/timeseries.py | 6 | 13239 | """Timeseries plotting functions."""
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats, interpolate
import matplotlib as mpl
import matplotlib.pyplot as plt
from .external.six import string_types
from . import utils
from . import algorithms as algo
from .palettes import color_palette
def tsplot(data, time=None, unit=None, condition=None, value=None,
err_style="ci_band", ci=68, interpolate=True, color=None,
estimator=np.mean, n_boot=5000, err_palette=None, err_kws=None,
legend=True, ax=None, **kwargs):
"""Plot one or more timeseries with flexible representation of uncertainty.
This function can take data specified either as a long-form (tidy)
DataFrame or as an ndarray with dimensions for sampling unit, time, and
(optionally) condition. The interpretation of some of the other parameters
changes depending on the type of object passed as data.
Parameters
----------
data : DataFrame or ndarray
Data for the plot. Should either be a "long form" dataframe or an
array with dimensions (unit, time, condition). In both cases, the
condition field/dimension is optional. The type of this argument
determines the interpretation of the next few parameters.
time : string or series-like
Either the name of the field corresponding to time in the data
DataFrame or x values for a plot when data is an array. If a Series,
the name will be used to label the x axis.
unit : string
Field in the data DataFrame identifying the sampling unit (e.g.
subject, neuron, etc.). The error representation will collapse over
units at each time/condition observation. This has no role when data
is an array.
value : string
Either the name of the field corresponding to the data values in
the data DataFrame (i.e. the y coordinate) or a string that forms
the y axis label when data is an array.
condition : string or Series-like
Either the name of the field identifying the condition an observation
falls under in the data DataFrame, or a sequence of names with a length
equal to the size of the third dimension of data. There will be a
separate trace plotted for each condition. If condition is a Series
with a name attribute, the name will form the title for the plot
legend (unless legend is set to False).
err_style : string or list of strings or None
Names of ways to plot uncertainty across units from set of
{ci_band, ci_bars, boot_traces, boot_kde, unit_traces, unit_points}.
Can use one or more than one method.
ci : float or list of floats in [0, 100]
Confidence interaval size(s). If a list, it will stack the error
plots for each confidence interval. Only relevant for error styles
with "ci" in the name.
interpolate : boolean
Whether to do a linear interpolation between each timepoint when
plotting. The value of this parameter also determines the marker
used for the main plot traces, unless marker is specified as a keyword
argument.
color : seaborn palette or matplotlib color name or dictionary
Palette or color for the main plots and error representation (unless
plotting by unit, which can be separately controlled with err_palette).
If a dictionary, should map condition name to color spec.
estimator : callable
Function to determine central tendency and to pass to bootstrap
must take an ``axis`` argument.
n_boot : int
Number of bootstrap iterations.
err_palette: seaborn palette
Palette name or list of colors used when plotting data for each unit.
err_kws : dict, optional
Keyword argument dictionary passed through to matplotlib function
generating the error plot,
ax : axis object, optional
Plot in given axis; if None creates a new figure
kwargs :
Other keyword arguments are passed to main plot() call
Returns
-------
ax : matplotlib axis
axis with plot data
"""
# Sort out default values for the parameters
if ax is None:
ax = plt.gca()
if err_kws is None:
err_kws = {}
# Handle different types of input data
if isinstance(data, pd.DataFrame):
xlabel = time
ylabel = value
# Condition is optional
if condition is None:
condition = pd.Series(np.ones(len(data)))
legend = False
legend_name = None
n_cond = 1
else:
legend = True and legend
legend_name = condition
n_cond = len(data[condition].unique())
else:
data = np.asarray(data)
# Data can be a timecourse from a single unit or
# several observations in one condition
if data.ndim == 1:
data = data[np.newaxis, :, np.newaxis]
elif data.ndim == 2:
data = data[:, :, np.newaxis]
n_unit, n_time, n_cond = data.shape
# Units are experimental observations. Maybe subjects, or neurons
if unit is None:
units = np.arange(n_unit)
unit = "unit"
units = np.repeat(units, n_time * n_cond)
ylabel = None
# Time forms the xaxis of the plot
if time is None:
times = np.arange(n_time)
else:
times = np.asarray(time)
xlabel = None
if hasattr(time, "name"):
xlabel = time.name
time = "time"
times = np.tile(np.repeat(times, n_cond), n_unit)
# Conditions split the timeseries plots
if condition is None:
conds = range(n_cond)
legend = False
if isinstance(color, dict):
err = "Must have condition names if using color dict."
raise ValueError(err)
else:
conds = np.asarray(condition)
legend = True and legend
if hasattr(condition, "name"):
legend_name = condition.name
else:
legend_name = None
condition = "cond"
conds = np.tile(conds, n_unit * n_time)
# Value forms the y value in the plot
if value is None:
ylabel = None
else:
ylabel = value
value = "value"
# Convert to long-form DataFrame
data = pd.DataFrame(dict(value=data.ravel(),
time=times,
unit=units,
cond=conds))
# Set up the err_style and ci arguments for the loop below
if isinstance(err_style, string_types):
err_style = [err_style]
elif err_style is None:
err_style = []
if not hasattr(ci, "__iter__"):
ci = [ci]
# Set up the color palette
if color is None:
current_palette = mpl.rcParams["axes.color_cycle"]
if len(current_palette) < n_cond:
colors = color_palette("husl", n_cond)
else:
colors = color_palette(n_colors=n_cond)
elif isinstance(color, dict):
colors = [color[c] for c in data[condition].unique()]
else:
try:
colors = color_palette(color, n_cond)
except ValueError:
color = mpl.colors.colorConverter.to_rgb(color)
colors = [color] * n_cond
# Do a groupby with condition and plot each trace
for c, (cond, df_c) in enumerate(data.groupby(condition, sort=False)):
df_c = df_c.pivot(unit, time, value)
x = df_c.columns.values.astype(np.float)
# Bootstrap the data for confidence intervals
boot_data = algo.bootstrap(df_c.values, n_boot=n_boot,
axis=0, func=estimator)
cis = [utils.ci(boot_data, v, axis=0) for v in ci]
central_data = estimator(df_c.values, axis=0)
# Get the color for this condition
color = colors[c]
# Use subroutines to plot the uncertainty
for style in err_style:
# Allow for null style (only plot central tendency)
if style is None:
continue
# Grab the function from the global environment
try:
plot_func = globals()["_plot_%s" % style]
except KeyError:
raise ValueError("%s is not a valid err_style" % style)
# Possibly set up to plot each observation in a different color
if err_palette is not None and "unit" in style:
orig_color = color
color = color_palette(err_palette, len(df_c.values))
# Pass all parameters to the error plotter as keyword args
plot_kwargs = dict(ax=ax, x=x, data=df_c.values,
boot_data=boot_data,
central_data=central_data,
color=color, err_kws=err_kws)
# Plot the error representation, possibly for multiple cis
for ci_i in cis:
plot_kwargs["ci"] = ci_i
plot_func(**plot_kwargs)
if err_palette is not None and "unit" in style:
color = orig_color
# Plot the central trace
kwargs.setdefault("marker", "" if interpolate else "o")
ls = kwargs.pop("ls", "-" if interpolate else "")
kwargs.setdefault("linestyle", ls)
label = cond if legend else "_nolegend_"
ax.plot(x, central_data, color=color, label=label, **kwargs)
# Pad the sides of the plot only when not interpolating
ax.set_xlim(x.min(), x.max())
x_diff = x[1] - x[0]
if not interpolate:
ax.set_xlim(x.min() - x_diff, x.max() + x_diff)
# Add the plot labels
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if legend:
ax.legend(loc=0, title=legend_name)
return ax
# Subroutines for tsplot errorbar plotting
# ----------------------------------------
def _plot_ci_band(ax, x, ci, color, err_kws, **kwargs):
"""Plot translucent error bands around the central tendancy."""
low, high = ci
if "alpha" not in err_kws:
err_kws["alpha"] = 0.2
ax.fill_between(x, low, high, color=color, **err_kws)
def _plot_ci_bars(ax, x, central_data, ci, color, err_kws, **kwargs):
"""Plot error bars at each data point."""
for x_i, y_i, (low, high) in zip(x, central_data, ci.T):
ax.plot([x_i, x_i], [low, high], color=color,
solid_capstyle="round", **err_kws)
def _plot_boot_traces(ax, x, boot_data, color, err_kws, **kwargs):
"""Plot 250 traces from bootstrap."""
err_kws.setdefault("alpha", 0.25)
err_kws.setdefault("linewidth", 0.25)
if "lw" in err_kws:
err_kws["linewidth"] = err_kws.pop("lw")
ax.plot(x, boot_data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_traces(ax, x, data, ci, color, err_kws, **kwargs):
"""Plot a trace for each observation in the original data."""
if isinstance(color, list):
if "alpha" not in err_kws:
err_kws["alpha"] = .5
for i, obs in enumerate(data):
ax.plot(x, obs, color=color[i], label="_nolegend_", **err_kws)
else:
if "alpha" not in err_kws:
err_kws["alpha"] = .2
ax.plot(x, data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_points(ax, x, data, color, err_kws, **kwargs):
"""Plot each original data point discretely."""
if isinstance(color, list):
for i, obs in enumerate(data):
ax.plot(x, obs, "o", color=color[i], alpha=0.8, markersize=4,
label="_nolegend_", **err_kws)
else:
ax.plot(x, data.T, "o", color=color, alpha=0.5, markersize=4,
label="_nolegend_", **err_kws)
def _plot_boot_kde(ax, x, boot_data, color, **kwargs):
"""Plot the kernal density estimate of the bootstrap distribution."""
kwargs.pop("data")
_ts_kde(ax, x, boot_data, color, **kwargs)
def _plot_unit_kde(ax, x, data, color, **kwargs):
"""Plot the kernal density estimate over the sample."""
_ts_kde(ax, x, data, color, **kwargs)
def _ts_kde(ax, x, data, color, **kwargs):
"""Upsample over time and plot a KDE of the bootstrap distribution."""
kde_data = []
y_min, y_max = data.min(), data.max()
y_vals = np.linspace(y_min, y_max, 100)
upsampler = interpolate.interp1d(x, data)
data_upsample = upsampler(np.linspace(x.min(), x.max(), 100))
for pt_data in data_upsample.T:
pt_kde = stats.kde.gaussian_kde(pt_data)
kde_data.append(pt_kde(y_vals))
kde_data = np.transpose(kde_data)
rgb = mpl.colors.ColorConverter().to_rgb(color)
img = np.zeros((kde_data.shape[0], kde_data.shape[1], 4))
img[:, :, :3] = rgb
kde_data /= kde_data.max(axis=0)
kde_data[kde_data > 1] = 1
img[:, :, 3] = kde_data
ax.imshow(img, interpolation="spline16", zorder=2,
extent=(x.min(), x.max(), y_min, y_max),
aspect="auto", origin="lower")
| bsd-3-clause |
zak-k/cis | cis/test/plot_tests/idiff.py | 3 | 2350 | #!/usr/bin/env python
# (C) British Crown Copyright 2010 - 2014, Met Office
#
# This file was heavily influenced by a similar file in the iris package.
"""
Provides "diff-like" comparison of images.
Currently relies on matplotlib for image processing so limited to PNG format.
"""
from __future__ import (absolute_import, division, print_function)
import os.path
import shutil
import matplotlib.pyplot as plt
import matplotlib.image as mimg
import matplotlib.widgets as mwidget
def diff_viewer(expected_fname, result_fname, diff_fname):
plt.figure(figsize=(16, 16))
plt.suptitle(os.path.basename(expected_fname))
ax = plt.subplot(221)
ax.imshow(mimg.imread(expected_fname))
ax = plt.subplot(222, sharex=ax, sharey=ax)
ax.imshow(mimg.imread(result_fname))
ax = plt.subplot(223, sharex=ax, sharey=ax)
ax.imshow(mimg.imread(diff_fname))
def accept(event):
# removes the expected result, and move the most recent result in
print('ACCEPTED NEW FILE: %s' % (os.path.basename(expected_fname), ))
os.remove(expected_fname)
shutil.copy2(result_fname, expected_fname)
os.remove(diff_fname)
plt.close()
def reject(event):
print('REJECTED: %s' % (os.path.basename(expected_fname), ))
plt.close()
ax_accept = plt.axes([0.6, 0.35, 0.1, 0.075])
ax_reject = plt.axes([0.71, 0.35, 0.1, 0.075])
bnext = mwidget.Button(ax_accept, 'Accept change')
bnext.on_clicked(accept)
bprev = mwidget.Button(ax_reject, 'Reject')
bprev.on_clicked(reject)
plt.show()
def step_over_diffs():
import cis.test.plot_tests
image_dir = os.path.join(os.path.dirname(cis.test.plot_tests.__file__),
'reference', 'visual_tests')
diff_dir = os.path.join(os.path.dirname(cis.test.plot_tests.__file__),
'result_image_comparison')
for expected_fname in sorted(os.listdir(image_dir)):
result_path = os.path.join(diff_dir, expected_fname)
diff_path = result_path[:-4] + '-failed-diff.png'
# if the test failed, there will be a diff file
if os.path.exists(diff_path):
expected_path = os.path.join(image_dir, expected_fname)
diff_viewer(expected_path, result_path, diff_path)
if __name__ == '__main__':
step_over_diffs()
| gpl-3.0 |
vermouthmjl/scikit-learn | sklearn/metrics/classification.py | 1 | 69294 | """Metrics to assess performance on classification task given class prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# Bernardo Stein <bernardovstein@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(sample_weight, y_true, y_pred)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape = [n_samples], default None
Sample weights.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
mean_yt = np.average(y_true, weights=sample_weight)
mean_yp = np.average(y_pred, weights=sample_weight)
y_true_u_cent = y_true - mean_yt
y_pred_u_cent = y_pred - mean_yp
cov_ytyp = np.average(y_true_u_cent * y_pred_u_cent, weights=sample_weight)
var_yt = np.average(y_true_u_cent ** 2, weights=sample_weight)
var_yp = np.average(y_pred_u_cent ** 2, weights=sample_weight)
mcc = cov_ytyp / np.sqrt(var_yt * var_yp)
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score <https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
target_names = ['%s' % l for l in labels]
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None, sample_weight=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<https://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred,
sample_weight=sample_weight)
return (n_differences /
(y_true.shape[0] * len(classes) * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
y_pred = check_array(y_pred, ensure_2d=False)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<https://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
https://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/decomposition/plot_sparse_coding.py | 1 | 4054 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import matplotlib.pylab as plt
import numpy as np
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15, 'navy'),
('Lasso', 'lasso_cd', 2, None, 'turquoise'), ]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
plt.subplot(1, 2, subplot + 1)
plt.title('Sparse coding against %s dictionary' % title)
plt.plot(y, lw=lw, linestyle='--', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y.reshape(1, -1))
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color=color, lw=lw,
label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y.reshape(1, -1))
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color='darkorange', lw=lw,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error'
% (len(idx), squared_error))
plt.axis('tight')
plt.legend(shadow=False, loc='best')
plt.subplots_adjust(.04, .07, .97, .90, .09, .2)
plt.show()
| mit |
ChrisBeaumont/brut | bubbly/hyperopt.py | 2 | 2563 | """
A simple interface for random exploration of hyperparameter space
"""
import random
import numpy as np
from scipy import stats
from sklearn.metrics import auc
from sklearn import metrics as met
class Choice(object):
"""Randomly select from a list"""
def __init__(self, *choices):
self._choices = choices
def rvs(self):
return random.choice(self._choices)
class Space(object):
"""
Spaces gather and randomly sample
collections of hyperparameters
Any class with an rvs method is a valid hyperparameter
(e.g., anything in scipy.stats is a hyperparameter)
"""
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
def __iter__(self):
while True:
yield {k: v.rvs() for k, v in self._hyperparams.items()}
def auc_below_fpos(y, yp, fpos):
"""
Variant on the area under the ROC curve score
Only integrate the portion of the curve
to the left of a threshold in fpos
"""
fp, tp, th = met.roc_curve(y, yp)
good = (fp <= fpos)
return auc(fp[good], tp[good])
def fmin(objective, space, threshold=np.inf):
"""
Generator that randomly samples a space,
and yields whenever a new minimum is encountered
Parameters
----------
objective : A function which takes hyperparameters
as input, and computes an objective function and classifier
out output
space : the Space to sample
threshold : A threshold in the objective function values.
If provided, will not yield anything until
the objective function falls below threshold
Yields
------
Tuples of (objective function, parameter dict, classifier)
"""
best = threshold
try:
for p in space:
f, clf = objective(**p)
if f < best:
best = f
yield best, p, clf
except KeyboardInterrupt:
pass
#default space for Gradient Boosted Decision trees
gb_space = Space(learning_rate = stats.uniform(1e-3, 1 - 1.01e-3),
n_estimators = Choice(50, 100, 200),
max_depth = Choice(1, 2, 3),
subsample = stats.uniform(1e-3, 1 - 1.01e-3))
#default space for WiseRF random forests
rf_space = Space(n_estimators = Choice(200, 400, 800, 1600),
min_samples_split = Choice(1, 2, 4),
criterion = Choice('gini', 'gainratio', 'infogain'),
max_features = Choice('auto'),
n_jobs = Choice(2))
| mit |
Scaravex/clue-hackathon | clustering/time_profile_cluster.py | 2 | 1438 | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 19 11:21:47 2017
@author: mskara
"""
import pandas as pd
import matplotlib.pyplot as plt
from src.pre_process import load_binary
def create_profile_for_symptoms(df, date_range=15):
profiles = {}
for symptom in symptoms:
temp = df[df['symptom'] == symptom]
sympt_profile = temp.groupby(by=temp['day_in_cycle']).mean()[0:date_range]
plt.plot(sympt_profile)
profiles[symptom] = sympt_profile
return profiles
def check_probability_access(data):
'''find probability_access'''
df_active = data['active_days']
df_cycles = data['cycles']
access_prob = []
for i in range(1, 30):
access_prob.append((df_active['day_in_cycle'] == i).sum()
/(df_cycles['cycle_length'][df_cycles['cycle_length']>=i]).count())
# access_prob.plot(X)
return access_prob
df = pd.read_csv('result.txt')
# now is done until 15 day, afterwords our predictions are wrong
daily_profiles = create_profile_for_symptoms(df,date_range = 15)
data = load_binary()
access_profile = check_probability_access(data)
plt.plot (access_profile[0:29]) # probability of access
for symptom in symptoms:
real_prob = daily_profiles[symptom].copy()
for i in range(15):
real_prob.loc[i]=real_prob.loc[i]/access_profile[i]
plt.plot(real_prob)
| apache-2.0 |
zuku1985/scikit-learn | sklearn/utils/tests/test_multiclass.py | 58 | 14316 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.metaestimators import _safe_split
from sklearn.model_selection import ShuffleSplit
from sklearn.svm import SVC
from sklearn import datasets
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formatted as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that weren't supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_check_classification_targets():
for y_type in EXAMPLES.keys():
if y_type in ["unknown", "continuous", 'continuous-multioutput']:
for example in EXAMPLES[y_type]:
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg,
check_classification_targets, example)
else:
for example in EXAMPLES[y_type]:
check_classification_targets(example)
# @ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = datasets.load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = ShuffleSplit(test_size=0.25, random_state=0)
train, test = list(cv.split(X))[0]
X_train, y_train = _safe_split(clf, X, y, train)
K_train, y_train2 = _safe_split(clfp, K, y, train)
assert_array_almost_equal(K_train, np.dot(X_train, X_train.T))
assert_array_almost_equal(y_train, y_train2)
X_test, y_test = _safe_split(clf, X, y, test, train)
K_test, y_test2 = _safe_split(clfp, K, y, test, train)
assert_array_almost_equal(K_test, np.dot(X_test, X_train.T))
assert_array_almost_equal(y_test, y_test2)
| bsd-3-clause |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/compat/numpy/__init__.py | 3 | 2213 | """ support numpy compatiblitiy across versions """
import re
import numpy as np
from distutils.version import LooseVersion
from pandas.compat import string_types, string_and_binary_types
# numpy versioning
_np_version = np.__version__
_nlv = LooseVersion(_np_version)
_np_version_under1p8 = _nlv < '1.8'
_np_version_under1p9 = _nlv < '1.9'
_np_version_under1p10 = _nlv < '1.10'
_np_version_under1p11 = _nlv < '1.11'
_np_version_under1p12 = _nlv < '1.12'
_np_version_under1p13 = _nlv < '1.13'
if _nlv < '1.7.0':
raise ImportError('this version of pandas is incompatible with '
'numpy < 1.7.0\n'
'your numpy version is {0}.\n'
'Please upgrade numpy to >= 1.7.0 to use '
'this pandas version'.format(_np_version))
_tz_regex = re.compile('[+-]0000$')
def tz_replacer(s):
if isinstance(s, string_types):
if s.endswith('Z'):
s = s[:-1]
elif _tz_regex.search(s):
s = s[:-5]
return s
def np_datetime64_compat(s, *args, **kwargs):
"""
provide compat for construction of strings to numpy datetime64's with
tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation
warning, when need to pass '2015-01-01 09:00:00'
"""
if not _np_version_under1p11:
s = tz_replacer(s)
return np.datetime64(s, *args, **kwargs)
def np_array_datetime64_compat(arr, *args, **kwargs):
"""
provide compat for construction of an array of strings to a
np.array(..., dtype=np.datetime64(..))
tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation
warning, when need to pass '2015-01-01 09:00:00'
"""
if not _np_version_under1p11:
# is_list_like
if hasattr(arr, '__iter__') and not \
isinstance(arr, string_and_binary_types):
arr = [tz_replacer(s) for s in arr]
else:
arr = tz_replacer(arr)
return np.array(arr, *args, **kwargs)
__all__ = ['np',
'_np_version_under1p8',
'_np_version_under1p9',
'_np_version_under1p10',
'_np_version_under1p11',
'_np_version_under1p12',
]
| agpl-3.0 |
dpinney/omf | omf/solvers/VB.py | 1 | 29740 | import pandas as pd
import pulp
import numpy as np
from numpy import *
class VirtualBattery(object):
""" Base class for abstraction. """
def __init__(self, ambient_temp, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number):
# C :thermal capacitance
# R : thermal resistance
# P: rated power (kW) of each TCL
# eta: COP
# delta: temperature deadband
# theta_s: temperature setpoint
# N: number of TCL
# ambient: ambient temperature
self.ambient = ambient_temp
self.C = capacitance
self.R = resistance
self.P = rated_power
self.eta = COP
self.delta = deadband
self.theta_s = setpoint
self.N = tcl_number
def generate(self, participation_number, P0_number):
""" Main calculation happens here. """
#heuristic function of participation
atan = np.arctan
participation = participation_number
P0 = P0_number
P0[P0 < 0] = 0.0 # set negative power consumption to 0
p_lower = self.N*participation*P0 # aggregated baseline power consumption considering participation
p_upper = self.N*participation*(self.P - P0)
p_upper[p_upper < 0] = 0.0 # set negative power upper bound to 0
e_ul = self.N*participation*self.C*self.delta/2/self.eta
return p_lower, p_upper, e_ul
class AC(VirtualBattery):
""" Derived Class for specifically AC Virtual Battery. """
def __init__(self, theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number):
super(AC, self).__init__(theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number)
# self.tcl_idx = tcl_idx
self.theta_a = self.ambient # theta_a == ambient temperature
def generate(self):
#heuristic function of participation
atan = np.arctan
# participation for AC
Ta = np.linspace(20, 45, num=51)
participation = (atan(self.theta_a-27) - atan(Ta[0]-27))/((atan(Ta[-1]-27) - atan(Ta[0]-27)))
participation = np.clip(participation, 0, 1)
#P0 for AC
P0 = (self.theta_a - self.theta_s)/self.R/self.eta # average baseline power consumption for the given temperature setpoint
return super(AC, self).generate(participation, P0)
class HP(VirtualBattery):
""" Derived Class for specifically HP Virtual Battery. """
def __init__(self, theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number):
super(HP, self).__init__(theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number)
# self.tcl_idx = tcl_idx
self.theta_a = self.ambient # theta_a == ambient temperature
def generate(self):
#heuristic function of participation
atan = np.arctan
# participation for HP
Ta = np.linspace(0, 25, num=51)
participation = 1-(atan(self.theta_a-10) - atan(Ta[0]-10))/((atan(Ta[-1]-10) - atan(Ta[0]-10)))
participation = np.clip(participation, 0, 1)
#P0 for HP
P0 = (self.theta_s - self.theta_a)/self.R/self.eta
return super(HP, self).generate(participation, P0)
class RG(VirtualBattery):
""" Derived Class for specifically RG Virtual Battery. """
def __init__(self, theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number):
super(RG, self).__init__(theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number)
# self.tcl_idx = tcl_idx
self.theta_a = self.ambient # theta_a == ambient temperature
def generate(self):
#heuristic function of participation
atan = np.arctan
# participation for RG
participation = np.ones(self.theta_a.shape)
participation = np.clip(participation, 0, 1)
#P0 for RG
P0 = (self.theta_a - self.theta_s)/self.R/self.eta # average baseline power consumption for the given temperature setpoint
return super(RG, self).generate(participation, P0)
class WH(VirtualBattery):
""" Derived class for specifically Water Heater Virtual Battery. """
N_wh = 50
def __init__(self, theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number,Tout, water):
super(WH, self).__init__(theta_a, capacitance, resistance, rated_power, COP, deadband, setpoint, tcl_number)
self.C_wh = self.C*np.ones((self.N_wh, 1)) # thermal capacitance, set in parent class
self.R_wh = self.R*np.ones((self.N_wh, 1)) # thermal resistance
self.P_wh = self.P*np.ones((self.N_wh, 1)) # rated power (kW) of each TCL
self.delta_wh = self.delta*np.ones((self.N_wh, 1)) # temperature deadband
self.theta_s_wh = self.theta_s*np.ones((self.N_wh, 1)) # temperature setpoint
self.Tout=Tout
self.water = water
# self.N = self.para[6] # number of TCL
def calculate_twat(self,tout_avg,tout_madif):
tout_avg=tout_avg/5*9+32
tout_madif=tout_madif/5*9
ratio = 0.4 + 0.01 * (tout_avg - 44)
lag = 35 - 1.0 * (tout_avg - 44)
twat = 1*np.ones((365*24*60,1))
for i in range(365):
for j in range(60*24):
twat[i*24*60+j]= (tout_avg+6)+ratio*(tout_madif/ 2) * sin((0.986 * (i - 15 - lag) - 90)/180*3.14)
twat=(twat-32.)/9.*5.
return twat
def prepare_pare_for_calculate_twat(self,tou_raw):
tout_avg = sum(tou_raw)/len(tou_raw)
mon=[31,28,31,30,31,30,31,31,30,31,30,31]
mon_ave=1*np.ones((12,1))
mon_ave[1]=sum(tou_raw[0:mon[1]*24])/mon[1]/24
stop=mon[1]*24
for idx in range(1,len(mon)):
mon_ave[idx]=sum(tou_raw[stop:stop+mon[idx]*24])/mon[idx]/24;
tou_madif=max(mon_ave)- min(mon_ave)
return tout_avg, tou_madif
def generate(self):
# theta_a is the ambient temperature
# theta_a = (72-32)*5.0/9*np.ones((365, 24*60)) # This is a hard-coded 72degF, converted to degCel
theta_a = self.ambient#*np.ones((365, 24*60)) # theta_a == ambient temperature
#nRow, nCol = theta_a.shape
nRow, nCol = 365, 24*60
theta_a = np.reshape(theta_a, [nRow*nCol, 1])
Tout1min= np.zeros((size(theta_a)));
for i in range(len(self.Tout)):
theta_a[i]= (self.Tout[i]+self.ambient[i])/2; # CHANGED THIS
# h is the model time discretization step in seconds
h = 60
#T is the number of time step considered, i.e., T = 365*24*60 means a year
# with 1 minute time discretization
T = len(theta_a)
tou_avg,maxdiff=self.prepare_pare_for_calculate_twat(self.Tout)
twat=self.calculate_twat(tou_avg,maxdiff);
# print twat
# theta_lower is the temperature lower bound
theta_lower_wh = self.theta_s_wh - self.delta_wh/2.0
# theta_upper is the temperature upper bound
theta_upper_wh = self.theta_s_wh + self.delta_wh/2.0
# m_water is the water draw in unit of gallon per minute
m_water = self.water#np.genfromtxt("Flow_raw_1minute_BPA.csv", delimiter=',')[1:, 1:]
where_are_NaNs = isnan(m_water)
m_water[where_are_NaNs] = 0
m_water = m_water *0.00378541178*1000/h
m_water_row, m_water_col = m_water.shape
water_draw = np.zeros((m_water_row, int(self.N_wh)))
for i in range(int(self.N_wh)):
k = np.random.randint(m_water_col)
water_draw[:, i] = np.roll(m_water[:, k], (1, np.random.randint(-14, 1))) + m_water[:, k] * 0.1 * (np.random.random() - 0.5)
# k = m_water_col - 1
# print(k)
# raise(ArgumentError, "Stop here")
# water_draw[:, i] = m_water[:, k]
first = -(
np.matmul(theta_a, np.ones((1, self.N_wh)))
- np.matmul(np.ones((T, 1)), self.theta_s_wh.transpose())
)
# print(np.argwhere(np.isnan(first)))
second = np.matmul(np.ones((T, 1)), self.R_wh.transpose())
# print(np.argwhere(np.isnan(second)))
Po = (
first
/ second
- 4.2
* np.multiply(water_draw, (55-32) * 5/9.0 - np.matmul(np.ones((T, 1)), self.theta_s_wh.transpose()))
)
# print(water_draw.shape)
# print(len(water_draw[:1]))
# Po_total is the analytically predicted aggregate baseline power
Po_total = np.sum(Po, axis=1)
upper_limit = np.sum(self.P_wh, axis=0)
# print(np.argwhere(np.isnan(water_draw)))
Po_total[Po_total > upper_limit[0]] = upper_limit
# theta is the temperature of TCLs
theta = np.zeros((self.N_wh, T))
theta[:, 0] = self.theta_s_wh.reshape(-1)
# m is the indicator of on-off state: 1 is on, 0 is off
m = np.ones((self.N_wh, T))
m[:int(self.N_wh*0.8), 0] = 0
for t in range(T - 1):
theta[:, t+1] = (
(1 - h/(self.C_wh * 3600) / self.R_wh).reshape(-1)
* theta[:, t]
+ (h / (self.C_wh * 3600) / self.R_wh).reshape(-1)
* theta_a[t]
+ ((h/(self.C_wh * 3600))*self.P_wh).reshape(-1)*m[:, t]
)
m[theta[:, t+1] > (theta_upper_wh).reshape(-1), t+1] = 0
m[theta[:, t+1] < (theta_lower_wh).reshape(-1), t+1] = 1
m[(theta[:, t+1] >= (theta_lower_wh).reshape(-1)) & (theta[:, t+1] <= (theta_upper_wh).reshape(-1)), t+1] = m[(theta[:, t+1] >= (theta_lower_wh).reshape(-1)) & (theta[:, t+1] <= (theta_upper_wh).reshape(-1)), t]
theta[:, 0] = theta[:, -1]
m[:, 0] = m[:, -1]
# Po_total_sim is the predicted aggregate baseline power using simulations
Po_total_sim = np.zeros((T, 1))
Po_total_sim[0] = np.sum(m[:, 0]*(self.P_wh.reshape(-1)))
for t in range(T - 1):
# print t
theta[:, t+1] = (1 - h/(self.C_wh * 3600)/self.R_wh).reshape(-1) * theta[:, t] + (h/(self.C_wh * 3600)/self.R_wh).reshape(-1)*theta_a[t] + (h/(self.C_wh*3600)).reshape(-1)*m[:, t]*self.P_wh.reshape(-1) + h*4.2*water_draw[t, :].transpose() * (twat[t] -theta[:, t]) / ((self.C_wh*3600).reshape(-1))
m[theta[:, t+1] > (theta_upper_wh).reshape(-1), t+1] = 0
m[theta[:, t+1] < (theta_lower_wh).reshape(-1), t+1] = 1
m[(theta[:, t+1] >= (theta_lower_wh).reshape(-1)) & (theta[:, t+1] <= (theta_upper_wh).reshape(-1)), t+1] = m[(theta[:, t+1] >= (theta_lower_wh).reshape(-1)) & (theta[:, t+1] <= (theta_upper_wh).reshape(-1)), t]
Po_total_sim[t+1] = np.sum(m[:, t+1] * self.P_wh.reshape(-1))
index_available = np.ones((self.N_wh, T))
for t in range(T - 1):
index_available[(theta[:, t] < (theta_lower_wh-0.5).reshape(-1)) | (theta[:, t] > (theta_upper_wh+0.5).reshape(-1)), t] = 0
# Virtual battery parameters
p_upper_wh1 = np.sum(self.P_wh) - Po_total_sim
p_lower_wh1 = Po_total_sim
e_ul_wh1 = np.sum((np.matmul(self.C_wh, np.ones((1, T))) * np.matmul(self.delta_wh, np.ones((1, T))) / 2 * index_available).transpose(), axis=1)
# calculate hourly average data from minute output for power
p_upper_wh1 = np.reshape(p_upper_wh1, [8760,60])
p_upper_wh = np.mean(p_upper_wh1, axis=1)*float(self.N)/float(self.N_wh)
p_lower_wh1 = np.reshape(p_lower_wh1, [8760,60])
p_lower_wh = np.mean(p_lower_wh1, axis=1)*float(self.N)/float(self.N_wh)
# extract hourly data from minute output for energy
e_ul_wh = e_ul_wh1[59:len(e_ul_wh1):60]*float(self.N)/float(self.N_wh)
return p_lower_wh, p_upper_wh, e_ul_wh
# ------------------------STACKED CODE FROM PNNL----------------------------- #
def run_fhec(ind, gt_demand, Input):
use_hour = int(ind["userHourLimit"]) # number of VB use hours specified by the user
epsilon = 1 #float(ind["energyReserve"]) # energy reserve parameter, range: 0 - 1
fhec_kwh_rate = float(ind["electricityCost"]) # $/kW
fhec_peak_mult = float(ind["peakMultiplier"])
s = sorted(gt_demand)
# peak hours calculation
perc = float(ind["peakPercentile"])
fhec_gt98 = s[int(perc*len(s))]
fhec_peak_hours = []
for idx, val in enumerate(gt_demand):
if val > fhec_gt98:
fhec_peak_hours.extend([idx+1])
fhec_off_peak_hours = []
for i in range(len(gt_demand)):
if i not in fhec_peak_hours:
fhec_off_peak_hours.extend([i+1])
# read the input data, including load profile, VB profile, and regulation price
# Input = pd.read_csv(input_csv, index_col=['Hour'])
# VB model parameters
C = float(ind["capacitance"]) # thermal capacitance
R = float(ind["resistance"]) # thermal resistance
deltaT = 1
alpha = math.exp(-deltaT/(C*R)) # hourly self discharge rate
E_0 = 0 # VB initial energy state
arbitrage_option = ind["use_arbitrage"] == "on"
regulation_option = ind["use_regulation"] == "on"
deferral_option = ind["use_deferral"] == "on"
# calculate the predicted profits for all 8760 hours
use_prft = []
for hour in Input.index:
temp = 0
if arbitrage_option or deferral_option:
if hour in fhec_peak_hours:
temp += fhec_peak_mult*fhec_kwh_rate*(Input.loc[hour, "VB Energy upper (kWh)"]-Input.loc[hour, "VB Energy lower (kWh)"])
if hour in fhec_off_peak_hours:
temp += fhec_kwh_rate*(Input.loc[hour, "VB Energy upper (kWh)"]-Input.loc[hour, "VB Energy lower (kWh)"])
if regulation_option:
temp += (Input.loc[hour, "Reg-up Price ($/MW)"]+Input.loc[hour, "Reg-dn Price ($/MW)"])/1000*(Input.loc[hour, "VB Energy upper (kWh)"]-Input.loc[hour, "VB Energy lower (kWh)"])
use_prft.append({'Hour': hour, 'Profit': temp})
# sort the predicted profits from the highest to the lowest
use_prft = sorted(use_prft, reverse = True, key = lambda i : i['Profit'])
# get the indices of the first use_hour hours, and the optimization will be scheduled only for those hours
use_list = []
for index in range(use_hour):
use_list.append(use_prft[index]['Hour'])
###############################################################################
# start demand charge reduction LP problem
model = pulp.LpProblem("Demand charge minimization problem FHEC-Knievel", pulp.LpMinimize)
# decision variable of VB charging power; dim: 8760 by 1
VBpower = pulp.LpVariable.dicts("ChargingPower", ((hour) for hour in Input.index))
# set bound
for hour in Input.index:
if hour in use_list:
VBpower[hour].lowBound = Input.loc[hour, "VB Power lower (kW)"]
VBpower[hour].upBound = Input.loc[hour, "VB Power upper (kW)"]
if hour not in use_list:
VBpower[hour].lowBound = 0
VBpower[hour].upBound = 0
# decision variable of VB energy state; dim: 8760 by 1
VBenergy = pulp.LpVariable.dicts("EnergyState", ((hour) for hour in Input.index))
# set bound
for hour in Input.index:
VBenergy[hour].lowBound = Input.loc[hour, "VB Energy lower (kWh)"]
VBenergy[hour].upBound = Input.loc[hour, "VB Energy upper (kWh)"]
# decision variable of annual peak demand
PeakDemand = pulp.LpVariable("annual peak demand", lowBound=0)
# decision variable: hourly regulation up capacity; dim: 8760 by 1
reg_up = pulp.LpVariable.dicts("hour reg up", ((hour) for hour in Input.index), lowBound=0)
# decision variable: hourly regulation dn capacity; dim: 8760 by 1
reg_dn = pulp.LpVariable.dicts("hour reg dn", ((hour) for hour in Input.index), lowBound=0)
for hour in Input.index:
if hour not in use_list:
reg_up[hour].upBound = 0
reg_dn[hour].upBound = 0
# objective functions
if (arbitrage_option == False and regulation_option == False and deferral_option == False):
model += 0, "an arbitrary objective function"
if (arbitrage_option == True and regulation_option == False and deferral_option == False):
model += pulp.lpSum([fhec_peak_mult*fhec_kwh_rate*(Input.loc[hour, "Load (kW)"]+VBpower[hour]) for hour in fhec_peak_hours]
+ [fhec_kwh_rate*(Input.loc[hour, "Load (kW)"]+VBpower[hour]) for hour in fhec_off_peak_hours])
if (arbitrage_option == False and regulation_option == True and deferral_option == False):
model += pulp.lpSum([-Input.loc[hour, "Reg-up Price ($/MW)"]/1000*reg_up[hour] for hour in Input.index]
+ [-Input.loc[hour, "Reg-dn Price ($/MW)"]/1000*reg_dn[hour] for hour in Input.index])
if (arbitrage_option == False and regulation_option == False and deferral_option == True):
model += pulp.lpSum(1E03*PeakDemand)
if (arbitrage_option == True and regulation_option == True and deferral_option == False):
model += pulp.lpSum([fhec_peak_mult*fhec_kwh_rate*(Input.loc[hour, "Load (kW)"]+VBpower[hour]) for hour in fhec_peak_hours]
+ [fhec_kwh_rate*(Input.loc[hour, "Load (kW)"]+VBpower[hour]) for hour in fhec_off_peak_hours]
+ [-Input.loc[hour, "Reg-up Price ($/MW)"]/1000*reg_up[hour] for hour in Input.index]
+ [-Input.loc[hour, "Reg-dn Price ($/MW)"]/1000*reg_dn[hour] for hour in Input.index])
if (arbitrage_option == True and regulation_option == False and deferral_option == True):
model += pulp.lpSum([fhec_peak_mult*fhec_kwh_rate*(Input.loc[hour, "Load (kW)"]+VBpower[hour]) for hour in fhec_peak_hours]
+ [fhec_kwh_rate*(Input.loc[hour, "Load (kW)"]+VBpower[hour]) for hour in fhec_off_peak_hours]
+ 1E03*PeakDemand)
if (arbitrage_option == False and regulation_option == True and deferral_option == True):
model += pulp.lpSum([-Input.loc[hour, "Reg-up Price ($/MW)"]/1000*reg_up[hour] for hour in Input.index]
+ [-Input.loc[hour, "Reg-dn Price ($/MW)"]/1000*reg_dn[hour] for hour in Input.index]
+ 1E03*PeakDemand)
if (arbitrage_option == True and regulation_option == True and deferral_option == True):
model += pulp.lpSum([fhec_peak_mult*fhec_kwh_rate*(Input.loc[hour, "Load (kW)"]+VBpower[hour]) for hour in fhec_peak_hours]
+ [fhec_kwh_rate*(Input.loc[hour, "Load (kW)"]+VBpower[hour]) for hour in fhec_off_peak_hours]
+ [-Input.loc[hour, "Reg-up Price ($/MW)"]/1000*reg_up[hour] for hour in Input.index]
+ [-Input.loc[hour, "Reg-dn Price ($/MW)"]/1000*reg_dn[hour] for hour in Input.index]
+ 1E03*PeakDemand)
# VB energy state as a function of VB power
for hour in Input.index:
if hour==1:
model += VBenergy[hour] == alpha*E_0 + VBpower[hour]*deltaT
else:
model += VBenergy[hour] == alpha*VBenergy[hour-1] + VBpower[hour]*deltaT
# hourly regulation constraints
for hour in Input.index:
if regulation_option:
model += reg_up[hour] == reg_dn[hour] # regulation balance
model += VBenergy[hour] - epsilon*reg_up[hour]*deltaT >= VBenergy[hour].lowBound
model += VBenergy[hour] + epsilon*reg_dn[hour]*deltaT <= VBenergy[hour].upBound
else:
model += reg_up[hour] == 0
model += reg_dn[hour] == 0
# extra constraints
for hour in Input.index:
model += PeakDemand >= Input.loc[hour, "Load (kW)"] + VBpower[hour]
model.solve()
###############################################################################
use_hour_indicator = []
for hour in Input.index:
if VBpower[hour].varValue != 0 or reg_up[hour].varValue != 0:
use_hour_indicator.append({'Hour': hour, 'Use': 1})
else:
use_hour_indicator.append({'Hour': hour, 'Use': 0})
output = []
for hour in Input.index:
var_output = {
'Hour': hour,
'VB energy (kWh)': int(100*VBenergy[hour].varValue)/100,
'VB power (kW)': int(100*VBpower[hour].varValue)/100,
'Load (kW)': int(100*Input.loc[hour, "Load (kW)"])/100,
'Net load (kW)': int(100*(VBpower[hour].varValue+Input.loc[hour, "Load (kW)"]))/100,
'Hour used': use_hour_indicator[hour-1]['Use']
}
if regulation_option:
var_regulation = {'Regulation (kW)': int(100*reg_up[hour].varValue)/100}
var_output.update(var_regulation)
output.append(var_output)
output_df = pd.DataFrame.from_records(output)
# output_df.to_csv('fhec_output.csv', index=False)
return output_df
def run_okec(ind, Input):
# Input.to_csv('okec_input.csv', index=False)
use_hour = int(ind["userHourLimit"]) # number of VB use hours specified by the user
epsilon = 1 #float(ind["energyReserve"]) # energy reserve parameter, range: 0 - 1
okec_peak_charge = float(ind["annual_peak_charge"]) # annual peak demand charge $100/kW
okec_avg_demand_charge = float(ind["avg_demand_charge"]) # $120/kW
okec_fuel_charge = float(ind["fuel_charge"]) # total energy $/kWh
# VB model parameters
C = float(ind["capacitance"]) # thermal capacitance
R = float(ind["resistance"]) # thermal resistance
deltaT = 1
alpha = math.exp(-deltaT/(C*R)) # hourly self discharge rate
E_0 = 0 # VB initial energy state
arbitrage_option = ind["use_arbitrage"] == "on"
regulation_option = ind["use_regulation"] == "on"
deferral_option = ind["use_deferral"] == "on"
# calculate the predicted profits for all 8760 hours
use_prft = []
for hour in Input.index:
temp = 0
if arbitrage_option or deferral_option:
temp += okec_avg_demand_charge/len(Input.index)*(Input.loc[hour, "VB Energy upper (kWh)"]-Input.loc[hour, "VB Energy lower (kWh)"])
if regulation_option:
temp += (Input.loc[hour, "Reg-up Price ($/MW)"]+Input.loc[hour, "Reg-dn Price ($/MW)"])/1000*(Input.loc[hour, "VB Energy upper (kWh)"]-Input.loc[hour, "VB Energy lower (kWh)"])
use_prft.append({'Hour': hour, 'Profit': temp})
# sort the predicted profits from the highest to the lowest
use_prft = sorted(use_prft, reverse = True, key = lambda i : i['Profit'])
# get the indices of the first use_hour hours, and the optimization will be scheduled only for those hours
use_list = []
for index in range(use_hour):
use_list.append(use_prft[index]['Hour'])
# start demand charge reduction LP problem
model = pulp.LpProblem("Demand charge minimization problem OKEC-Buffett", pulp.LpMinimize)
# decision variable of VB charging power; dim: 8760 by 1
VBpower = pulp.LpVariable.dicts("ChargingPower", ((hour) for hour in Input.index))
# set bound
for hour in Input.index:
if hour in use_list:
VBpower[hour].lowBound = Input.loc[hour, "VB Power lower (kW)"]
VBpower[hour].upBound = Input.loc[hour, "VB Power upper (kW)"]
if hour not in use_list:
VBpower[hour].lowBound = 0
VBpower[hour].upBound = 0
# decision variable of VB energy state; dim: 8760 by 1
VBenergy = pulp.LpVariable.dicts("EnergyState", ((hour) for hour in Input.index))
# set bound
for hour in Input.index:
VBenergy[hour].lowBound = Input.loc[hour, "VB Energy lower (kWh)"]
VBenergy[hour].upBound = Input.loc[hour, "VB Energy upper (kWh)"]
# decision variable of annual peak demand
PeakDemand = pulp.LpVariable("annual peak demand", lowBound=0)
# decision variable: hourly regulation up capacity; dim: 8760 by 1
reg_up = pulp.LpVariable.dicts("hour reg up", ((hour) for hour in Input.index), lowBound=0)
# decision variable: hourly regulation dn capacity; dim: 8760 by 1
reg_dn = pulp.LpVariable.dicts("hour reg dn", ((hour) for hour in Input.index), lowBound=0)
for hour in Input.index:
if hour not in use_list:
reg_up[hour].upBound = 0
reg_dn[hour].upBound = 0
# objective function: sum of monthly demand charge
if (arbitrage_option == False and regulation_option == False and deferral_option == False):
model += 0, "an arbitrary objective function"
if (arbitrage_option == True and regulation_option == False and deferral_option == False):
model += pulp.lpSum(okec_peak_charge*PeakDemand
+ [okec_avg_demand_charge*(Input.loc[hour, "Load (kW)"]+VBpower[hour])/len(Input.index) for hour in Input.index]
+ [okec_fuel_charge*(Input.loc[hour, "Load (kW)"]+VBpower[hour])*deltaT for hour in Input.index])
if (arbitrage_option == False and regulation_option == True and deferral_option == False):
model += pulp.lpSum([-Input.loc[hour, "Reg-up Price ($/MW)"]/1000*reg_up[hour] for hour in Input.index]
+ [-Input.loc[hour, "Reg-dn Price ($/MW)"]/1000*reg_dn[hour] for hour in Input.index])
if (arbitrage_option == False and regulation_option == False and deferral_option == True):
model += pulp.lpSum(1E03*PeakDemand)
if (arbitrage_option == True and regulation_option == True and deferral_option == False):
model += pulp.lpSum(okec_peak_charge*PeakDemand
+ [okec_avg_demand_charge*(Input.loc[hour, "Load (kW)"]+VBpower[hour])/len(Input.index) for hour in Input.index]
+ [okec_fuel_charge*(Input.loc[hour, "Load (kW)"]+VBpower[hour])*deltaT for hour in Input.index]
+ [-Input.loc[hour, "Reg-up Price ($/MW)"]/1000*reg_up[hour] for hour in Input.index]
+ [-Input.loc[hour, "Reg-dn Price ($/MW)"]/1000*reg_dn[hour] for hour in Input.index])
if (arbitrage_option == True and regulation_option == False and deferral_option == True):
model += pulp.lpSum(okec_peak_charge*PeakDemand
+ [okec_avg_demand_charge*(Input.loc[hour, "Load (kW)"]+VBpower[hour])/len(Input.index) for hour in Input.index]
+ [okec_fuel_charge*(Input.loc[hour, "Load (kW)"]+VBpower[hour])*deltaT for hour in Input.index]
+ 1E03*PeakDemand)
if (arbitrage_option == False and regulation_option == True and deferral_option == True):
model += pulp.lpSum([-Input.loc[hour, "Reg-up Price ($/MW)"]/1000*reg_up[hour] for hour in Input.index]
+ [-Input.loc[hour, "Reg-dn Price ($/MW)"]/1000*reg_dn[hour] for hour in Input.index]
+ 1E03*PeakDemand)
if (arbitrage_option == True and regulation_option == True and deferral_option == True):
model += pulp.lpSum(okec_peak_charge*PeakDemand
+ [okec_avg_demand_charge*(Input.loc[hour, "Load (kW)"]+VBpower[hour])/len(Input.index) for hour in Input.index]
+ [okec_fuel_charge*(Input.loc[hour, "Load (kW)"]+VBpower[hour])*deltaT for hour in Input.index]
+ [-Input.loc[hour, "Reg-up Price ($/MW)"]/1000*reg_up[hour] for hour in Input.index]
+ [-Input.loc[hour, "Reg-dn Price ($/MW)"]/1000*reg_dn[hour] for hour in Input.index]
+ 1E03*PeakDemand)
# VB energy state as a function of VB power
for hour in Input.index:
if hour==1:
model += VBenergy[hour] == alpha*E_0 + VBpower[hour]*deltaT
else:
model += VBenergy[hour] == alpha*VBenergy[hour-1] + VBpower[hour]*deltaT
# hourly regulation constraints
for hour in Input.index:
if regulation_option:
model += reg_up[hour] == reg_dn[hour] # regulation balance
model += VBenergy[hour] - epsilon*reg_up[hour]*deltaT >= VBenergy[hour].lowBound
model += VBenergy[hour] + epsilon*reg_dn[hour]*deltaT <= VBenergy[hour].upBound
else:
model += reg_up[hour] == 0
model += reg_dn[hour] == 0
# extra constraints
for hour in Input.index:
model += PeakDemand >= Input.loc[hour, "Load (kW)"] + VBpower[hour]
model.solve()
###############################################################################
use_hour_indicator = []
for hour in Input.index:
if VBpower[hour].varValue != 0 or reg_up[hour].varValue != 0:
use_hour_indicator.append({'Hour': hour, 'Use': 1})
else:
use_hour_indicator.append({'Hour': hour, 'Use': 0})
output = []
for hour in Input.index:
var_output = {
'Hour': hour,
'VB energy (kWh)': int(100*VBenergy[hour].varValue)/100,
'VB power (kW)': int(100*VBpower[hour].varValue)/100,
'Load (kW)': int(100*Input.loc[hour, "Load (kW)"])/100,
'Net load (kW)': int(100*(VBpower[hour].varValue+Input.loc[hour, "Load (kW)"]))/100,
'Hour used': use_hour_indicator[hour-1]['Use']
}
if regulation_option:
var_regulation = {'Regulation (kW)': int(100*reg_up[hour].varValue)/100}
var_output.update(var_regulation)
output.append(var_output)
output_df = pd.DataFrame.from_records(output)
return output_df
| gpl-2.0 |
etkirsch/scikit-learn | sklearn/datasets/species_distributions.py | 198 | 7923 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
ominux/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 1 | 4105 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases signicantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print __doc__
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import numpy as np
import pylab as pl
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=10, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignement with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
pl.figure(1)
plots = []
names = []
for score_func in score_funcs:
print "Computing %s for %d values of n_clusters and n_samples=%d" % (
score_func.__name__, len(n_clusters_range), n_samples)
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
plots.append(pl.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1)))
names.append(score_func.__name__)
pl.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
pl.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
pl.ylabel('Score value')
pl.legend(plots, names)
pl.ylim(ymin=-0.05, ymax=1.05)
pl.show()
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
pl.figure(2)
plots = []
names = []
for score_func in score_funcs:
print "Computing %s for %d values of n_clusters and n_samples=%d" % (
score_func.__name__, len(n_clusters_range), n_samples)
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
plots.append(pl.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1)))
names.append(score_func.__name__)
pl.title("Clustering measures for random uniform labeling\n"
"against reference assignement with %d classes" % n_classes)
pl.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
pl.ylabel('Score value')
pl.ylim(ymin=-0.05, ymax=1.05)
pl.legend(plots, names)
pl.show()
| bsd-3-clause |
percyfal/snakemakelib-core | snakemakelib/plot/bokeh/color.py | 1 | 1126 | # Copyright (C) 2015 by Per Unneberg
import math
import pandas.core.common as com
from bokeh.palettes import brewer as bokeh_brewer
from .palettes import brewer as snakemakelib_brewer
import logging
logger = logging.getLogger(__name__)
MINSIZE = 3
MAXSIZE = 9 # FIXME: some palettes have 9 as max, some 11
brewer = bokeh_brewer
brewer.update(snakemakelib_brewer)
def colorbrewer(size=MINSIZE, palette="Paired", datalen=None):
"""Generate a color palette following colorbrewer.
Args:
size (int): size of desired palette
palette (str): name of palette
datalen (int): length of data vector. If None, the palette size
will equal size, else the colors will be reused to fill
up a vector of length datalen
Returns:
palette (list): list of colors
"""
size = max(MINSIZE, min(size, MAXSIZE))
if datalen <= MAXSIZE and datalen >= MINSIZE:
size = datalen
colors = brewer[palette][size]
if datalen > size:
colors = colors * math.ceil(datalen / size)
return colors[0:datalen]
else:
return colors
| mit |
eegroopm/pyLATTICE | gui/pyLATTICE.py | 1 | 74321 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
pyLATTICE is...
"""
from __future__ import division #necessary for python2
from __future__ import unicode_literals
# define authorship information
__authors__ = ['Evan Groopman', 'Thomas Bernatowicz']
__author__ = ','.join(__authors__)
__credits__ = []
__copyright__ = 'Copyright (c) 2011-2014'
__license__ = 'GPL'
# maintanence information
__maintainer__ = 'Evan Groopman'
__email__ = 'eegroopm@gmail.com'
"""
Created on Wed Apr 11 14:46:56 2012
@author: Evan Groopman
"""
#Main imports
from PyQt4 import QtCore, QtGui, uic
import os, sys
import numpy as np
import pandas as pd
import re
#Matplotlib imports
import matplotlib as mpl
mpl.use('Qt4Agg')
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
# Local files in the resource directory
import gui
from resources.TableWidget import TableWidget
from resources.Diffraction import Diffraction
#from resources.pyqtresizer import logit,slResizer,Resizer
from resources.IPythonConsole import IPythonConsole
from resources.common import common
from resources.matplotlibwidget import matplotlibWidget
from resources.Dialogs import (MineralListDialog, NewMineralDialog,
ManualConditionsDialog, SettingsDialog)
try:
from resources.dspace import DSpace
print('Importing compiled "DSpace"')
except ImportError as error:
# Attempt autocompilation.
import pyximport
pyximport.install()
from resources._dspace import DSpace
print('Building "DSpace"')
try:
from resources.diffspot import CalcSpots, CalcSpotsHCP
print('Importing compiled "DiffSpot"')
except ImportError as error:
# Attempt autocompilation.
import pyximport
pyximport.install()
from resources._diffspot import CalcSpots, CalcSpotsHCP
print('Building "DiffSpot"')
#need different compiled versions of Cython modules depending on python version
#if sys.version_info[0] == 3:
# #from resources.dspace import DSpace#Cython function for calculating d-spaces
# #from resources.diffspot import CalcSpots, CalcSpotsHCP#Cython function for calculating diffraction spot coordinates
# from resources.pyqtresizer import logit,slResizer,Resizer
#elif sys.version_info[0] == 2:
# #from resources.dspace_py2 import DSpace#Cython function for calculating d-spaces
# #from resources.diffspot_py2 import CalcSpots, CalcSpotsHCP#Cython function for calculating diffraction spot coordinates
# from resources.pyqtresizer_py2 import logit,slResizer,Resizer
#from Wulff_net import WULFF
#dealing with unicode characters in windows, which breaks compiled linux rendering
if sys.platform == 'win32':
mpl.rc('font', **{'sans-serif' : 'Arial Unicode MS','family' : 'sans-serif'})
#elif sys.platform == 'linux':# and os.path.isfile('pyLATTICE'): #on linux AND executable file exists. Does nothing if running from source
# print('Adjusting font')
# mpl.rc('font',**{'sans-serif' : 'Bitstream Vera Sans','family' : 'sans-serif'})
#use LaTeX to render symbols
#plt.rc('text', usetex=True)
mpl.rcParams['mathtext.default'] = 'regular'
#mpl.rcParams['text.latex.preamble'] = [r'\usepackage{textcomp}']
#mpl.rcParams['text.latex.unicode'] = True
#Other
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
################################################################################
## Gui file ##
# Set up window
class pyLATTICE_GUI(QtGui.QMainWindow):
def __init__(self, parent=None):
super(pyLATTICE_GUI, self).__init__(parent)
# load the ui
gui.loadUi(__file__,self)
self.version = gui.__version__
self._grabCommon()
self.Diffraction = Diffraction()
self.DiffWidget = matplotlibWidget(self.common,self.Diffraction) #mplwidget can access common data
#self.DiffWidget.setStyleSheet("font-family: 'Arial Unicode MS', Arial, sans-serif; font-size: 15px;")
self.verticalLayout.addWidget(self.DiffWidget)
self.DiffWidget.distances.connect(self.on_distances_sent)
#self.DiffWidget = self.MplWidget
self.Plot = self.DiffWidget.canvas.ax
self.Plot.axis('equal') #locks aspect ratio 1:1, even when zooming
#matplotlibWidget.setupToolbar(self.DiffWidget.canvas, self.DiffTab)
# Create the navigation toolbar, tied to the canvas
self.mpl_toolbar = NavigationToolbar(self.DiffWidget.canvas, self.DiffTab)
#add widgets to toolbar
self.comboBox_rotate = QtGui.QComboBox()
self.checkBox_labels = QtGui.QCheckBox("Labels")
self.checkBox_labels.setChecked(True)
self.mpl_toolbar.addWidget(self.comboBox_rotate)
self.mpl_toolbar.addWidget(self.checkBox_labels)
#add toolbar to tabs
self.verticalLayout.addWidget(self.mpl_toolbar)
#Plot initial zero spot
self.Plot.plot(0,0, linestyle = '', marker='o', markersize = 10, color = 'black')
self.Plot.set_xlim([-5,5])
self.Plot.set_ylim([-5,5])
#self.Plot.annotate('0 0 0', xy = (0,0), xytext=(0,10),textcoords = 'offset points', ha = 'center', va = 'bottom',
#bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.01))
#Initialize Metric tensor Tables
self.Gtable_size = (200,200)
self.Gtable = TableWidget()
self.G_inv_table = TableWidget()
self.tensorlayout.addWidget(self.Gtable,2,0) #third row, first column
self.tensorlayout.addWidget(self.G_inv_table,2,1)
self.Gtable.resize(self.Gtable_size[0],self.Gtable_size[1])
self.G_inv_table.resize(self.Gtable_size[0],self.Gtable_size[1])
self.Gtable.setData(np.eye(3))
self.G_inv_table.setData(np.eye(3))
for i in range(3):
self.Gtable.setColumnWidth(i,self.Gtable_size[0]/4)
self.Gtable.setRowHeight(i,self.Gtable_size[1]/3.5)
self.G_inv_table.setColumnWidth(i,self.Gtable_size[0]/3.35)
self.G_inv_table.setRowHeight(i,self.Gtable_size[1]/3.5)
self.a = 1; self.b=1; self.c=1
#Initialize parameter tables
self.param_table_size = (200,200)
self.Gparam_table = TableWidget()
self.Gparam_inv_table = TableWidget()
self.Gparam_table.resize(self.param_table_size[0],self.param_table_size[1])
self.Gparam_inv_table.resize(self.param_table_size[0],self.param_table_size[1])
initdat = np.transpose(np.array([[1,1,1,90,90,90]]))
self.Gparam_table.setData(initdat)
self.Gparam_inv_table.setData(initdat)
self.Gparam_table.setHorizontalHeaderLabels(['Parameters'])
self.Gparam_inv_table.setHorizontalHeaderLabels(['Parameters'])
self.Gparam_table.setVerticalHeaderLabels([u'a',u'b',u'c',u'\u03B1',u'\u03B2',u'\u03B3'])
self.Gparam_inv_table.setVerticalHeaderLabels([u'a*',u'b*',u'c*',u'\u03B1*',u'\u03B2*',u'\u03B3*'])
self.tensorlayout.addWidget(self.Gparam_table,3,0)
self.tensorlayout.addWidget(self.Gparam_inv_table,3,1)
for i in range(0,6):
self.Gparam_table.setColumnWidth(i,self.param_table_size[0])
self.Gparam_table.setRowHeight(i,self.param_table_size[0]/6.7)
self.Gparam_inv_table.setColumnWidth(i,self.param_table_size[0])
self.Gparam_inv_table.setRowHeight(i,self.param_table_size[0]/6.7)
#D-spacing table
self.dspace_table_size = (400,630)
self.dspace_table = TableWidget()
self.dspace_table.resize(self.dspace_table_size[0],self.dspace_table_size[1])
self.dspace_table.setData(np.array([[0,0,0,0]]))
self.dspace_table.setHorizontalHeaderLabels(['d-space','h','k','l'])
self.dspacelayout.addWidget(self.dspace_table)
self.dspace_table.setColumnWidth(0,80)
for i in range(1,4):
self.dspace_table.setColumnWidth(i,50)
# Set miller indices
self.miller_indices = [str(x) for x in range(-6,7)]
self.comboBox_hmin.addItems(self.miller_indices)
self.comboBox_kmin.addItems(self.miller_indices)
self.comboBox_lmin.addItems(self.miller_indices)
self.comboBox_hmin.setCurrentIndex(4)
self.comboBox_kmin.setCurrentIndex(4)
self.comboBox_lmin.setCurrentIndex(4)
# Miller max indices set to be 1 greater than selected min index
self.comboBox_hmax.addItems(self.miller_indices)
self.comboBox_kmax.addItems(self.miller_indices)
self.comboBox_lmax.addItems(self.miller_indices)
#self.setMillerMax_h()
#self.setMillerMax_k()
#self.setMillerMax_l()
self.comboBox_hmax.setCurrentIndex(8)
self.comboBox_kmax.setCurrentIndex(8)
self.comboBox_lmax.setCurrentIndex(8)
#Set zone axis parameters
#by default set as [0 0 1]
zone_indices = [str(x) for x in range(-5,6)]
self.comboBox_u.addItems(zone_indices)
self.comboBox_v.addItems(zone_indices)
self.comboBox_w.addItems(zone_indices)
self.comboBox_u.setCurrentIndex(5)
self.comboBox_v.setCurrentIndex(5)
self.comboBox_w.setCurrentIndex(6)
#set calculator comboboxes
self.comboBox_h1.addItems(self.miller_indices)
self.comboBox_h2.addItems(self.miller_indices)
self.comboBox_k1.addItems(self.miller_indices)
self.comboBox_k2.addItems(self.miller_indices)
self.comboBox_l1.addItems(self.miller_indices)
self.comboBox_l2.addItems(self.miller_indices)
self.comboBox_h1.setCurrentIndex(7)
self.comboBox_h2.setCurrentIndex(8)
self.comboBox_k1.setCurrentIndex(6)
self.comboBox_k2.setCurrentIndex(6)
self.comboBox_l1.setCurrentIndex(6)
self.comboBox_l2.setCurrentIndex(6)
#Initialize mineral database combobox
self.setMineralList()
#Initialize rotation of diffraction pattern.
#Will only offer 0,90,180,270 degrees
rotate_items = ['-180','-150','-120','-90','-60','-30','0','30','60','90','120','150','180']
self.comboBox_rotate.addItems(rotate_items)
self.comboBox_rotate.setCurrentIndex(6) #zero by default
#get values in energy, cam legnth, cam const. combo boxes
self.spinBox_beamenergy.setValue(int(self.common.beamenergy))
self.spinBox_camlength.setValue(int(self.common.camlength))
self.doubleSpinBox_camconst.setValue(self.common.camconst)
#Initialize signals and slots
#This needs to go here after setting Miller indices
#When initializing, it runs Recalculate to do metric tensor and d-spacings
#must go before setting crystal types, but after setting all of the combo boxes
#combo boxes recalculate each time their index changes once the signals/slots set up
#if signals/slots placed before, will recalc d-spacings every time you initialize a combobox value
self.signals_slots()
# Set crystal type combo box items:
self.crystaltypes = ['Cubic','Tetragonal','Orthorhombic','Trigonal', 'Hexagonal','Monoclinic','Triclinic']
self.comboBox_crystaltype.addItems(self.crystaltypes)
#Redo some labels in unicode/greek characters
self.label_alpha.setText(u'\u03B1')
self.label_beta.setText(u'\u03B2')
self.label_gamma.setText(u'\u03B3')
self.label_dist_recip.setText(u'Reciprocal Distance (\u212B\u207B\u00B9 )')
self.label_dist_real.setText(u'Real Distance (\u212B)')
self.label_dist_film.setText(u'Film Distance (cm)')
self.label_angle.setText(u'Angle (\u00B0)')
v = self.version.split('.')
pv = v[0] + '.' + v[1] #only major/minor versions. not bugfixes
self.label_pyLATTICE.setText(u'pyLATTICE %s' % pv)
#initialize popup IPython console
#can interact with specific data
self._initIPython(self.common)
def _grabCommon(self):
"""Get all common variables."""
self.common = common()
self._overline_strings = self.common._overline_strings
self.DSpaces = self.common.DSpaces
self.ZoneAxis = self.common.ZoneAxis
self.u = self.common.u
self.v = self.common.v
self.w = self.common.w
#lattice parameters and angles
self.a = self.common.a
self.b = self.common.b
self.c = self.common.c
self.astar = self.common.astar
self.bstar = self.common.bstar
self.cstar = self.common.cstar
self.alpha = self.common.alpha
self.beta = self.common.beta
self.gamma = self.common.gamma
self.alphastar = self.common.alphastar
self.betastar = self.common.betastar
self.gammastar = self.common.gammastar
#TEM params
self.beamenergy = self.common.beamenergy
self.camlength = self.common.camlength
self.camconst = self.common.camconst
self.wavelength = self.common.wavelength
#SpaceGroup data
self.sg = self.common.sg
self.sghex = self.common.sghex
self.mineraldb = self.common.mineraldb
self.manualConds = self.common.manualConds #manual space group conditions
def updateCommon(self):
"""Update all of the common variables and push these to the IPython console"""
self.common.DSpaces = self.DSpaces
self.common.ZoneAxis = self.ZoneAxis
self.common.u = self.u
self.common.v = self.v
self.common.w = self.w
self.common.a = self.a
self.common.b = self.b
self.common.c = self.c
self.common.astar = self.astar
self.common.bstar = self.bstar
self.common.cstar = self.cstar
self.common.alpha = self.alpha
self.common.beta = self.beta
self.common.gamma = self.gamma
self.common.alphastar = self.alphastar
self.common.betastar = self.betastar
self.common.gammastar = self.gammastar
#mineral database and manual conditions
#self.mineraldb = self.common.mineraldb
self.common.manualConds = self.manualConds #manual space group conditions
self.common.beamenergy = self.beamenergy = self.spinBox_beamenergy.value()
self.common.camlength = self.camlength = self.spinBox_camlength.value()
self.common.camconst = self.camconst = self.doubleSpinBox_camconst.value()
self.common.wavelength = self.wavelength = self.common.Wavelength(self.beamenergy)
self.updateIPY(self.common)
@QtCore.pyqtSlot(str,str,str,str)
def on_distances_sent(self,recip_d, real_d, film_d, angle):
self.lineEdit_recip_2.setText(recip_d)
self.lineEdit_real_2.setText(real_d)
self.lineEdit_film_2.setText(film_d)
self.lineEdit_angle_3.setText(angle)
def Recalculate(self):
"""Run MetricTensor() and D_Spacigns(). For use when slider hasn't changed"""
self.MetricTensor()
self.D_Spacings()
def ReplotDiffraction(self):
self.Recalculate()
try:
self.PlotDiffraction()
except UnboundLocalError:
pass
# def Print(self):
# """test print fn"""
# print(self.comboBox_spacegroup.currentIndex())
def signals_slots(self):
"""All of the signals and slots not in .ui file"""
#Testing
#QtCore.QObject.connect(self.command_Wulff, QtCore.SIGNAL(_fromUtf8("clicked()")),WULFF)
### Menu actions
QtCore.QObject.connect(self.actionClose, QtCore.SIGNAL(_fromUtf8("triggered()")), self.close)
QtCore.QObject.connect(self.actionAbout, QtCore.SIGNAL(_fromUtf8("triggered()")), self.About)
QtCore.QObject.connect(self.actionHow_to, QtCore.SIGNAL(_fromUtf8("triggered()")), self.HowTo)
QtCore.QObject.connect(self.actionSave_D_spacings, QtCore.SIGNAL(_fromUtf8("triggered()")), self.SaveDSpace)
QtCore.QObject.connect(self.actionRemove_DB_Minerals, QtCore.SIGNAL(_fromUtf8("triggered()")), self.removeMinerals)
QtCore.QObject.connect(self.actionSave_Mineral_Database, QtCore.SIGNAL(_fromUtf8("triggered()")), self.SaveMineralDB)
QtCore.QObject.connect(self.actionLoad_Mineral_Database, QtCore.SIGNAL(_fromUtf8("triggered()")), self.LoadMineralDB)
QtCore.QObject.connect(self.actionAppendMineral, QtCore.SIGNAL(_fromUtf8("triggered()")), self.AppendMineral)
QtCore.QObject.connect(self.actionIPython_Console, QtCore.SIGNAL(_fromUtf8("triggered()")), self.IPY)
QtCore.QObject.connect(self.actionManualCond, QtCore.SIGNAL(_fromUtf8("triggered()")), self.ManualConditions)
QtCore.QObject.connect(self.actionSettings, QtCore.SIGNAL(_fromUtf8("triggered()")), self.setSettings)
### Command buttons
QtCore.QObject.connect(self.command_Plot, QtCore.SIGNAL(_fromUtf8("clicked()")),self.PlotDiffraction)
QtCore.QObject.connect(self.command_recalculate, QtCore.SIGNAL(_fromUtf8("clicked()")),self.Recalculate)
#QtCore.QObject.connect(self.command_Wulff, QtCore.SIGNAL(_fromUtf8("clicked()")),self.updateIPY)
### crystal and cell type actions
QtCore.QObject.connect(self.comboBox_crystaltype, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setCellType)
QtCore.QObject.connect(self.comboBox_celltype, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setConditions)
QtCore.QObject.connect(self.spinBox_spacegroup, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.SpaceGroupLookup)
QtCore.QObject.connect(self.checkBox_obverse, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.D_Spacings)
QtCore.QObject.connect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral)
#QtCore.QObject.connect(self.comboBox_spacegroup, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.D_Spacings)
### Navigation Toolbar buttons
QtCore.QObject.connect(self.comboBox_rotate, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.PlotDiffraction)
QtCore.QObject.connect(self.checkBox_labels, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.PlotDiffraction) #labels checkbox
### Checkboxes and Miller indices
QtCore.QObject.connect(self.checkBox_samemin, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.sameMin)
QtCore.QObject.connect(self.checkBox_samemax, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.sameMax)
QtCore.QObject.connect(self.comboBox_hmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_kmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.comboBox_kmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_lmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.comboBox_lmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_hmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.comboBox_hmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_kmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.comboBox_kmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_lmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.comboBox_lmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_hmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.checkBox_showforbidden, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.PlotDiffraction)
### Sliders/spin boxes: lattice parameters
QtCore.QObject.connect(self.hSlider_a, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.slider_to_spindouble)
QtCore.QObject.connect(self.hSlider_b, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.slider_to_spindouble)
QtCore.QObject.connect(self.hSlider_c, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.slider_to_spindouble)
QtCore.QObject.connect(self.hSlider_a, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.D_Spacings)
QtCore.QObject.connect(self.hSlider_b, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.D_Spacings)
QtCore.QObject.connect(self.hSlider_c, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.D_Spacings)
QtCore.QObject.connect(self.doubleSpinBox_a, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.spindouble_to_slider)
QtCore.QObject.connect(self.doubleSpinBox_b, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.spindouble_to_slider)
QtCore.QObject.connect(self.doubleSpinBox_c, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.spindouble_to_slider)
QtCore.QObject.connect(self.doubleSpinBox_a, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.MetricTensor)
QtCore.QObject.connect(self.doubleSpinBox_b, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.MetricTensor)
QtCore.QObject.connect(self.doubleSpinBox_c, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.MetricTensor)
#QtCore.QObject.connect(self.doubleSpinBox_a, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.D_Spacings)
#QtCore.QObject.connect(self.doubleSpinBox_b, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.D_Spacings)
#QtCore.QObject.connect(self.doubleSpinBox_c, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.D_Spacings)
QtCore.QObject.connect(self.hSlider_alpha, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.MetricTensor)
QtCore.QObject.connect(self.hSlider_beta, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.MetricTensor)
QtCore.QObject.connect(self.hSlider_gamma, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.MetricTensor)
QtCore.QObject.connect(self.hSlider_alpha, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.D_Spacings)
QtCore.QObject.connect(self.hSlider_beta, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.D_Spacings)
QtCore.QObject.connect(self.hSlider_gamma, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.D_Spacings)
#Spinboxes beam energy, cam length, camconst
QtCore.QObject.connect(self.spinBox_beamenergy,QtCore.SIGNAL(_fromUtf8("valueChanged(int)")),self.updateCommon)
QtCore.QObject.connect(self.spinBox_camlength,QtCore.SIGNAL(_fromUtf8("valueChanged(int)")),self.updateCommon)
QtCore.QObject.connect(self.doubleSpinBox_camconst,QtCore.SIGNAL(_fromUtf8("valueChanged(double)")),self.updateCommon)
#Instances to recalculate metric tensor and d-spacings
#only enable these once you get miller maxes sorted out so they don't change
QtCore.QObject.connect(self.checkBox_zoneaxis, QtCore.SIGNAL(_fromUtf8("toggled(bool)")),self.DiffWidget, QtCore.SLOT(_fromUtf8("setEnabled(bool)")))
QtCore.QObject.connect(self.comboBox_u, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.ReplotDiffraction)
QtCore.QObject.connect(self.comboBox_v, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.ReplotDiffraction)
QtCore.QObject.connect(self.comboBox_w, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.ReplotDiffraction)
QtCore.QObject.connect(self.comboBox_hmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.D_Spacings)
QtCore.QObject.connect(self.comboBox_hmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.D_Spacings)
#QtCore.QObject.connect(self.checkBox_labels, QtCore.SIGNAL(_fromUtf8("toggled(bool)")),self.UpdatePlot)
#QtCore.QObject.connect(self.comboBox_hmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.TempMax)
#QtCore.QObject.connect(self.comboBox_kmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.TempMax)
#QtCore.QObject.connect(self.comboBox_lmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.TempMax)
#QtCore.QObject.connect(self.comboBox_hmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.Recalculate)
#QtCore.QObject.connect(self.comboBox_kmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.Recalculate)
#QtCore.QObject.connect(self.comboBox_lmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.Recalculate)
#QtCore.QObject.connect(self.comboBox_w, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.ReplotDiffraction)
#Calculator Tab
QtCore.QObject.connect(self.checkBox_normals, QtCore.SIGNAL(_fromUtf8("toggled(bool)")),self.CalcLabels)
QtCore.QObject.connect(self.command_angle, QtCore.SIGNAL(_fromUtf8("clicked()")),self.Calculator)
def _initIPython(self,common):
"""Initialize IPython console from which the user can interact with data/files"""
banner = """Welcome to the pyLATTICE IPython Qt4 Console.
You are here to interact with data and parameters - Python command line knowledge required.
Use the 'whos' command for a list of available variables. Sometimes this does not work the first time.
Imported packages include: pylab (including numpy modules) as 'pl'; pandas as 'pd'
\n"""
self.ipywidget = IPythonConsole(common,banner=banner)
def IPY(self):
self.ipywidget.SHOW()
def updateIPY(self,common):
self.ipyvars = common.__dict__
self.ipywidget.pushVariables(self.ipyvars)
def slider_to_spindouble(self,slider):
"""Sliders only send/receive int data. Converts int to double by dividing by 100."""
if self.hSlider_a.isSliderDown():
self.a = self.hSlider_a.value() / 100
self.doubleSpinBox_a.setValue(self.a)
elif self.hSlider_b.isSliderDown():
self.b = self.hSlider_b.value() / 100
self.doubleSpinBox_b.setValue(self.b)
elif self.hSlider_c.isSliderDown():
self.c = self.hSlider_c.value() / 100
self.doubleSpinBox_c.setValue(self.c)
def spindouble_to_slider(self,spinbox):
"""Converts spindouble entry into int for slider (multiply by 100)"""
#There may be some redundancy in the connections setting values.
#hopefully this does not slow the program down.
#without these, some aspect often lags and gives the wrong value
if self.comboBox_crystaltype.currentText() == 'Cubic':
self.a = self.doubleSpinBox_a.value()
self.hSlider_a.setValue(self.a * 100)
self.hSlider_b.setValue(self.a * 100);self.doubleSpinBox_b.setValue(self.a)
self.hSlider_c.setValue(self.a * 100);self.doubleSpinBox_c.setValue(self.a)
elif self.comboBox_crystaltype.currentText() == 'Tetragonal':
self.a = self.doubleSpinBox_a.value()
self.hSlider_a.setValue(self.a * 100)
self.hSlider_b.setValue(self.a * 100); self.doubleSpinBox_b.setValue(self.a)
elif self.comboBox_crystaltype.currentText() == 'Trigonal':
self.a = self.doubleSpinBox_a.value()
self.hSlider_a.setValue(self.a * 100)
self.hSlider_b.setValue(self.a * 100); self.doubleSpinBox_b.setValue(self.a)
elif self.comboBox_crystaltype.currentText() == 'Hexagonal':
self.a = self.doubleSpinBox_a.value()
self.hSlider_a.setValue(self.a * 100)
self.hSlider_b.setValue(self.a * 100); self.doubleSpinBox_b.setValue(self.a)
else:
self.a = self.doubleSpinBox_a.value()
self.hSlider_a.setValue(self.a * 100)
self.b = self.doubleSpinBox_b.value()
self.hSlider_b.setValue(self.b * 100)
self.c = self.doubleSpinBox_c.value()
self.hSlider_c.setValue(self.c * 100)
def setMillerMax_h(self):
"""Sets the items available for the max miller indices to include everything greater than the selected min index"""
self.miller_max_h = [str(x) for x in range(int(self.comboBox_hmin.currentText()) + 1,7)]
self.comboBox_hmax.clear()
self.comboBox_hmax.addItems(self.miller_max_h)
def setMillerMax_k(self):
"""Sets the items available for the max miller indices to include everything greater than the selected min index"""
self.miller_max_k = [str(x) for x in range(int(self.comboBox_kmin.currentText()) + 1,7)]
self.comboBox_kmax.clear()
self.comboBox_kmax.addItems(self.miller_max_k)
def setMillerMax_l(self):
"""Sets the items available for the max miller indices to include everything greater than the selected min index"""
self.miller_max_l = [str(x) for x in range(int(self.comboBox_lmin.currentText()) + 1,7)]
self.comboBox_lmax.clear()
self.comboBox_lmax.addItems(self.miller_max_l)
def sameMin(self):
if not self.checkBox_samemin.isChecked():
#change to value_changed, not index changed. lengths may be different if checkboxes aren't clicked
QtCore.QObject.disconnect(self.comboBox_hmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_kmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.disconnect(self.comboBox_kmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_lmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.disconnect(self.comboBox_lmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_hmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
elif self.checkBox_samemin.isChecked():
QtCore.QObject.connect(self.comboBox_hmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_kmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.comboBox_kmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_lmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.comboBox_lmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_hmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
def sameMax(self):
if not self.checkBox_samemax.isChecked():
QtCore.QObject.disconnect(self.comboBox_hmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_kmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.disconnect(self.comboBox_kmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_lmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.disconnect(self.comboBox_lmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_hmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
elif self.checkBox_samemax.isChecked():
QtCore.QObject.connect(self.comboBox_hmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_kmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.comboBox_kmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_lmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
QtCore.QObject.connect(self.comboBox_lmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_hmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)")))
def setMineral(self):
i = self.comboBox_mineraldb.currentIndex()
if i == 0:
pass
else:
#disconnect d-space calculations till the end
QtCore.QObject.disconnect(self.comboBox_crystaltype, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setCellType)
QtCore.QObject.disconnect(self.comboBox_celltype, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setConditions)
QtCore.QObject.disconnect(self.spinBox_spacegroup, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.SpaceGroupLookup)
m = self.mineraldb.loc[i]
ind = ['Cubic','Tetragonal','Orthorhombic','Trigonal','Hexagonal','Monoclinic','Triclinic'].index(m.Crystal)
self.comboBox_crystaltype.setCurrentIndex(ind)
self.setCellType()
ind = self.celltypes.index(m.UnitCell)
self.comboBox_celltype.setCurrentIndex(ind)
self.setConditions()
ind = self.sgnumbers.index(m.SpaceGroup)
self.comboBox_spacegroup.setCurrentIndex(ind)
#now a,b,c paramters
#print(self.sgnumbers)
self.doubleSpinBox_a.setValue(m.a)
self.a = m.a
if not np.isnan(m.b):
self.doubleSpinBox_b.setValue(m.b)
self.b = m.b
if not np.isnan(m.c):
self.doubleSpinBox_c.setValue(m.c)
self.c = m.c
try:
self.manualConds = m.SpecialConditions.split(';')
except AttributeError: #ignore floats or nans
self.manualConds = ''
self.MetricTensor()
#reconnect and calculate
QtCore.QObject.connect(self.comboBox_crystaltype, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setCellType)
QtCore.QObject.connect(self.comboBox_celltype, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setConditions)
QtCore.QObject.connect(self.spinBox_spacegroup, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.SpaceGroupLookup)
self.D_Spacings()
def setCellType(self):
"""Sets the unit cell possibilities based upon the crystal type selected"""
self.comboBox_celltype.clear()
self.comboBox_spacegroup.clear()
self.celltypes = []
if self.comboBox_crystaltype.currentText() == 'Cubic':
self.celltypes = ['Primitive','Face Centered','Body Centered']
self.length_label = u' a = b = c'
self.label_lattice_equality.setText(self.length_label)
self.hSlider_b.setDisabled(True); self.hSlider_c.setDisabled(True)
self.doubleSpinBox_b.setDisabled(True); self.doubleSpinBox_c.setDisabled(True)
self.angles_label = u' \u03B1 = \u03B2 = \u03B3 = 90°'
self.label_angle_equality.setText(self.angles_label)
self.alpha = 90; self.beta = 90; self.gamma = 90
self.hSlider_alpha.setValue(self.alpha); self.hSlider_beta.setValue(self.beta); self.hSlider_gamma.setValue(self.gamma)
#disable editing sliders and spinboxes
self.hSlider_alpha.setDisabled(True); self.hSlider_beta.setDisabled(True); self.hSlider_gamma.setDisabled(True)
self.spinBox_alpha.setDisabled(True); self.spinBox_beta.setDisabled(True); self.spinBox_gamma.setDisabled(True)
elif self.comboBox_crystaltype.currentText() == 'Tetragonal':
self.celltypes = ['Primitive','Body Centered']
self.length_label = u' a = b ≠ c'
self.label_lattice_equality.setText(self.length_label)
self.hSlider_b.setDisabled(True); self.hSlider_c.setDisabled(False)
self.doubleSpinBox_b.setDisabled(True); self.doubleSpinBox_c.setDisabled(False)
self.angles_label = u' \u03B1 = \u03B2 = \u03B3 = 90°'
self.label_angle_equality.setText(self.angles_label)
self.alpha = 90; self.beta = 90; self.gamma = 90
self.hSlider_alpha.setValue(self.alpha); self.hSlider_beta.setValue(self.beta); self.hSlider_gamma.setValue(self.gamma)
#disable editing sliders and spinboxes
self.hSlider_alpha.setDisabled(True); self.hSlider_beta.setDisabled(True); self.hSlider_gamma.setDisabled(True)
self.spinBox_alpha.setDisabled(True); self.spinBox_beta.setDisabled(True); self.spinBox_gamma.setDisabled(True)
elif self.comboBox_crystaltype.currentText() == 'Orthorhombic':
self.celltypes = ['Primitive','Face Centered','Body Centered','(001) Base Centered','(100) Base Centered']
self.length_label = u' a ≠ b ≠ c'
self.label_lattice_equality.setText(self.length_label)
self.hSlider_b.setDisabled(False); self.hSlider_c.setDisabled(False)
self.doubleSpinBox_b.setDisabled(False); self.doubleSpinBox_c.setDisabled(False)
self.angles_label = u' \u03B1 = \u03B2 = \u03B3 = 90°'
self.label_angle_equality.setText(self.angles_label)
self.alpha = 90; self.beta = 90; self.gamma = 90
self.hSlider_alpha.setValue(self.alpha); self.hSlider_beta.setValue(self.beta); self.hSlider_gamma.setValue(self.gamma)
#disable editing sliders and spinboxes
self.hSlider_alpha.setDisabled(True); self.hSlider_beta.setDisabled(True); self.hSlider_gamma.setDisabled(True)
self.spinBox_alpha.setDisabled(True); self.spinBox_beta.setDisabled(True); self.spinBox_gamma.setDisabled(True)
elif self.comboBox_crystaltype.currentText() == 'Trigonal':
self.celltypes = ['Primitive','Rhombohedral','Rhombohedral, Hexagonal Axes','Hexagonal']
self.length_label = u' a = b ≠ c'
self.label_lattice_equality.setText(self.length_label)
self.hSlider_b.setDisabled(True); self.hSlider_c.setDisabled(False)
self.doubleSpinBox_b.setDisabled(True); self.doubleSpinBox_c.setDisabled(False)
self.angles_label = u' \u03B1 = \u03B2 = 90°, \u03B3 = 120°'
self.label_angle_equality.setText(self.angles_label)
self.alpha = 90; self.beta = 90; self.gamma = 120
self.hSlider_alpha.setValue(self.alpha); self.hSlider_beta.setValue(self.beta); self.hSlider_gamma.setValue(self.gamma)
#disable editing sliders and spinboxes
self.hSlider_alpha.setDisabled(True); self.hSlider_beta.setDisabled(True); self.hSlider_gamma.setDisabled(True)
self.spinBox_alpha.setDisabled(True); self.spinBox_beta.setDisabled(True); self.spinBox_gamma.setDisabled(True)
elif self.comboBox_crystaltype.currentText() == 'Hexagonal':
self.celltypes = ['Primitive']
self.length_label = u' a = b ≠ c'
self.label_lattice_equality.setText(self.length_label)
self.hSlider_b.setDisabled(True); self.hSlider_c.setDisabled(False)
self.doubleSpinBox_b.setDisabled(True); self.doubleSpinBox_c.setDisabled(False)
self.angles_label = u' \u03B1 = \u03B2 = 90°, \u03B3 = 120°'
self.label_angle_equality.setText(self.angles_label)
self.alpha = 90; self.beta = 90; self.gamma = 120
self.hSlider_alpha.setValue(self.alpha); self.hSlider_beta.setValue(self.beta); self.hSlider_gamma.setValue(self.gamma)
#disable editing sliders and spinboxes
self.hSlider_alpha.setDisabled(True); self.hSlider_beta.setDisabled(True); self.hSlider_gamma.setDisabled(True)
self.spinBox_alpha.setDisabled(True); self.spinBox_beta.setDisabled(True); self.spinBox_gamma.setDisabled(True)
elif self.comboBox_crystaltype.currentText() == 'Monoclinic':
self.celltypes = ['Primitive','(001) Base Centered']
self.length_label = u' a ≠ b ≠ c'
self.label_lattice_equality.setText(self.length_label)
self.hSlider_b.setDisabled(False); self.hSlider_c.setDisabled(False)
self.doubleSpinBox_b.setDisabled(False); self.doubleSpinBox_c.setDisabled(False)
self.angles_label = u' \u03B1 = \u03B3 = 90°'
self.label_angle_equality.setText(self.angles_label)
self.alpha = 90; self.beta = 90; self.gamma = 90
self.hSlider_alpha.setValue(self.alpha); self.hSlider_beta.setValue(self.beta); self.hSlider_gamma.setValue(self.gamma)
#disable editing sliders and spinboxes
self.hSlider_alpha.setDisabled(True); self.hSlider_beta.setDisabled(True); self.hSlider_gamma.setDisabled(True)
self.spinBox_alpha.setDisabled(True); self.spinBox_beta.setDisabled(True); self.spinBox_gamma.setDisabled(True)
elif self.comboBox_crystaltype.currentText() == 'Triclinic':
self.celltypes = ['Primitive']
self.length_label = u' a ≠ b ≠ c'
self.label_lattice_equality.setText(self.length_label)
self.hSlider_b.setDisabled(False); self.hSlider_c.setDisabled(False)
self.doubleSpinBox_b.setDisabled(False); self.doubleSpinBox_c.setDisabled(False)
self.angles_label = u''
self.label_angle_equality.setText(self.angles_label)
#Enable editing sliders and spinboxes
self.hSlider_alpha.setDisabled(False); self.hSlider_beta.setDisabled(False); self.hSlider_gamma.setDisabled(False)
self.spinBox_alpha.setDisabled(False); self.spinBox_beta.setDisabled(False); self.spinBox_gamma.setDisabled(False)
self.comboBox_celltype.addItems(self.celltypes)
#self.Recalculate()
def setConditions(self):
"""Sets conditions based upon which unit cell type is chosen.
Store equations in strings and then evaluate and solve with eval()"""
geom = self.comboBox_crystaltype.currentText()
unit = self.comboBox_celltype.currentText()
if unit in ['Rhombohedral','Rhombohedral, Hexagonal Axes']:
self.checkBox_obverse.setDisabled(False)
else:
self.checkBox_obverse.setDisabled(True)
try: #there is a loop I cant find where this tries to calculate conditions before unit cell type is set resulting in index error.
#this simply supresses the error, as another pass is always fine.
if unit in ['Rhombohedral, Hexagonal Axes','Hexagonal']:
rhomhex=True
self.conditions = np.unique(self.sghex[self.sghex['Unit Cell'] == unit]['Conditions'])[0]
else:
rhomhex=False
self.conditions = np.unique(self.sg[self.sg['Unit Cell'] == unit]['Conditions'])[0] #grab individual condition b/c of repetition
self.setSpaceGroups(geom,unit,rhomhex)
#QtCore.QObject.disconnect(self.comboBox_spacegroup, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.D_Spacings)
self.comboBox_spacegroup.clear()
self.comboBox_spacegroup.addItems(self.sgnumlist)
#QtCore.QObject.connect(self.comboBox_spacegroup, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.D_Spacings)
self.Recalculate()
except IndexError:
pass
def setSpaceGroups(self,geom,unit,rhomhex=False):
"""Sets the space group options based upon crystal geometry and unit cell type"""
if rhomhex:
sg = self.sghex
elif not rhomhex:
sg = self.sg
self.sgnumbers = list(sg[(sg['Geometry'] == geom) & (sg['Unit Cell'] == unit)].index)
self.sglist = list(sg.loc[self.sgnumbers,'Patterson'])
self.sgnumlist = [str(x) + ': ' + y for x,y in zip(self.sgnumbers,self.sglist)]
def SpaceGroupConditions(self,number):
"""Looks up the space-group-specific allowed diffraction spots.
number is the specific space group number to look up."""
if not self.checkBox_spacegroup.isChecked():
sg_conditions = 'True'
elif self.checkBox_spacegroup.isChecked():
#make sure number is an integer
#something is wrong with some FCC crystals
number = int(number)
unit = self.comboBox_celltype.currentText()
if unit in ['Rhombohedral, Hexagonal Axes','Hexagonal']:
sg = self.sghex
else:
sg = self.sg
sg_conditions = sg.loc[number,'SG Conditions']
return sg_conditions
def SpaceGroupLookup(self):
"""Takes input from slider/spinbox and outputs sapcegroup info into text line"""
index = self.spinBox_spacegroup.value()
c = self.sg.loc[index,'Geometry']
#u = self.sg.loc[index,'Unit Cell']
sg = self.sg.loc[index,'Patterson']
text = ': '.join([c,sg])
self.label_spacegrouplookup.setText(text)
def MetricTensor(self):
"""Calculate and show G, the metric tensor, and G*, the inverse metric tensor.
Also call function that outputs parameters into tables."""
self.G = np.zeros([3,3])
#self.G_inv
#remember, indices start at 0
#metric tensor is axially symmetric
self.a = self.doubleSpinBox_a.value()
self.b = self.doubleSpinBox_b.value()
self.c = self.doubleSpinBox_c.value()
self.G[0,0] = self.a**2
self.G[0,1] = round(self.a * self.b * np.cos(np.radians(self.spinBox_gamma.value())),6)
self.G[1,0] = self.G[0,1]
self.G[1,1] = self.b**2
self.G[0,2] = round(self.a * self.c * np.cos(np.radians(self.spinBox_beta.value())),6)
self.G[2,0] = self.G[0,2]
self.G[2,2] = self.doubleSpinBox_c.value()**2
self.G[1,2] = round(self.c * self.b * np.cos(np.radians(self.spinBox_alpha.value())),6)
self.G[2,1] = self.G[1,2]
# calc G inverse, G*
self.G_inv = np.linalg.inv(self.G)
self.Gtable.setData(self.G)
#self.Gtable.resizeColumnsToContents()
self.G_inv_table.setData(self.G_inv)
#self.G_inv_table.resizeColumnsToContents()
for i in range(0,3):
self.Gtable.setColumnWidth(i,self.Gtable_size[0]/3.35)
self.Gtable.setRowHeight(i,self.Gtable_size[1]/3.5)
self.G_inv_table.setColumnWidth(i,self.Gtable_size[0]/3.35)
self.G_inv_table.setRowHeight(i,self.Gtable_size[1]/3.5)
self.Parameters()
def Parameters(self):
"""Grabs current parameters and outputs them in tables.
Calculates reciprocal lattice parameters as well.
Must make it deal with complex numbers, but really only necessary for Triclinic..."""
self.parameters_direct = np.transpose(np.array([[self.doubleSpinBox_a.value(),self.doubleSpinBox_b.value(),self.doubleSpinBox_c.value(),self.spinBox_alpha.value(),self.spinBox_beta.value(),self.spinBox_gamma.value()]]))
self.astar = np.sqrt(self.G_inv[0,0]); self.bstar = np.sqrt(self.G_inv[1,1]); self.cstar = np.sqrt(self.G_inv[2,2])
self.gammastar = np.arccos(self.G_inv[0,1] / (self.astar * self.bstar))*180 / np.pi
self.betastar = np.arccos(self.G_inv[0,2] / (self.astar * self.cstar))*180 / np.pi
self.alphastar = np.arccos(self.G_inv[1,2] / (self.cstar * self.bstar))*180 / np.pi
self.parameters_reciprocal = np.transpose(np.array([[self.astar,self.bstar,self.cstar,self.alphastar,self.betastar,self.gammastar]]))
self.Gparam_table.setData(self.parameters_direct)
self.Gparam_inv_table.setData(self.parameters_reciprocal)
self.Gparam_table.setHorizontalHeaderLabels(['Parameters'])
self.Gparam_inv_table.setHorizontalHeaderLabels(['Parameters'])
self.Gparam_table.setVerticalHeaderLabels([u'a',u'b',u'c',u'\u03B1',u'\u03B2',u'\u03B3'])
self.Gparam_inv_table.setVerticalHeaderLabels([u'a*',u'b*',u'c*',u'\u03B1*',u'\u03B2*',u'\u03B3*'])
for i in range(0,6):
self.Gparam_table.setColumnWidth(i,self.param_table_size[0])
self.Gparam_table.setRowHeight(i,self.param_table_size[0]/6.7)
self.Gparam_inv_table.setColumnWidth(i,self.param_table_size[0])
self.Gparam_inv_table.setRowHeight(i,self.param_table_size[0]/6.7)
def D_Spacings(self):
"""Calculates D-spacings using the metric tensor and places them in a table (sorted?)"""
#grab spacegroup conditions
#multiple different spacegroup conditions. e.g. eval('h==1 or k==1') returns a True if on is satisfied
#add all conditions together into one string
#full_conditions = self.conditions + ' and ' + sg_conditions
if self.checkBox_zoneaxis.isChecked():
try:
self.u = int(self.comboBox_u.currentText())
except ValueError:
self.u = 0
try:
self.v = int(self.comboBox_v.currentText())
except ValueError:
self.v = 0
try:
self.w = int(self.comboBox_w.currentText())
except ValueError:
self.w = 0
#set "q" for rhombohedral obserse/reverse
if self.checkBox_obverse.isChecked():
q = 1
elif not self.checkBox_obverse.isChecked():
q = -1
else:
q = 0
#make pandas dataframe with multiindex h,k,l
#reinitialize dataframe
self.DSpaces = pd.DataFrame(columns = ['d-space','h','k','l'])
self.Forbidden = pd.DataFrame(columns = ['d-space','h','k','l'])
#maybe implement masking instead of loops
hmin = int(self.comboBox_hmin.currentText())
hmax = int(self.comboBox_hmax.currentText())
kmin = int(self.comboBox_kmin.currentText())
kmax = int(self.comboBox_kmax.currentText())
lmin = int(self.comboBox_lmin.currentText())
lmax = int(self.comboBox_lmax.currentText())
gen_conditions = str(self.conditions)
#needs to deal with possibility of conditional special statements, will update dspace.pyx
#first calculate all general conditions
self.DSpaces = DSpace(self.G_inv,self.u,self.v,self.w,hmin,hmax,kmin,kmax,lmin,lmax,gen_conditions,q)
#now deal with special spacegroup conditions by removing invalid spots
sg_conditions = self.SpaceGroupConditions(self.sgnumbers[self.comboBox_spacegroup.currentIndex()])
if self.manualConds != []:
if sg_conditions == 'True':
sg_conditions = ''
for c in self.manualConds:
sg_conditions += ';%s' % c
sg_conditions = sg_conditions.lstrip(';')
self.DSpaces = self.RemoveForbidden(self.DSpaces,sg_conditions)
#sort in descending Dspace order, then by h values, then k, then l...
self.DSpaces.sort(columns=['d-space','h','k','l'],ascending=False,inplace=True)
#reset indices for convenience later
self.DSpaces.index = [x for x in range(len(self.DSpaces))]
self.common.DSpaces = self.DSpaces #update DSpaces
self.dspace_table.setData(self.DSpaces)
self.dspace_table.setColumnWidth(0,80) #make d-space column a bit wider
for i in range(1,4):
self.dspace_table.setColumnWidth(i,45)
elif not self.checkBox_zoneaxis.isChecked():
pass
try:
self.updateCommon()
except AttributeError: #first go round ipython console hasn't been initialized yet
pass
def RemoveForbidden(self,d,sgconditions):
#h = d['h']; k = d['k']; l = d['l']
f = pd.DataFrame(columns = ['d-space','h','k','l'])
try:
if eval(sgconditions):
return(d)
except (KeyError,SyntaxError): #if sgconditions not 'True'
#d[(h==k) & ~(l%2==0)]
#d = d.drop(r.index)
#split multiple conditions up
conds = sgconditions.split(';')
for c in conds: #these should be if:then statements, so remove the if:~thens
c = c.strip()
if not c.startswith('if'):
r = d[eval(c)]
d = d.drop(r.index)
else:
c = c.lstrip('if').strip()
iff, then = c.split(':') #eval doesnt care about spaces
#needed for eval
h = d.h; k = d.k; l = d.l
r = d[eval('(' + iff + ')& ~(' + then + ')')]
d = d.drop(r.index)
f = pd.concat([f,r])
f.sort(columns=['d-space','h','k','l'],ascending=False,inplace=True)
f.index = [x for x in range(len(f))]
self.common.Forbidden = f
self.Forbidden = self.common.Forbidden
return(d)
def setMineralList(self):
self.comboBox_mineraldb.clear()
self.minlist = list(self.mineraldb['Chemical'] + ': ' + self.mineraldb['Name'])
self.comboBox_mineraldb.addItems(self.minlist)
def removeMinerals(self):
QtCore.QObject.disconnect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral)
mindiag = MineralListDialog()
model = QtGui.QStandardItemModel(mindiag.listView)
#mindiag.buttonBox.accepted.connect(mindiag.accept)
#mindiag.buttonBox.rejected.connect(mindiag.reject)
for mineral in self.minlist[1:]:
item = QtGui.QStandardItem(mineral)
item.setCheckable(True)
item.setEditable(False)
model.appendRow(item)
mindiag.listView.setModel(model)
if mindiag.exec_():
i=1
l=[]
while model.item(i):
if model.item(i).checkState():
l.append(i)
i += 1
self.mineraldb = self.mineraldb.drop(self.mineraldb.index[l])
self.mineraldb.index = list(range(len(self.mineraldb)))
self.setMineralList()
QtCore.QObject.connect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral)
def AppendMineral(self):
QtCore.QObject.disconnect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral)
dial = NewMineralDialog()
if dial.exec_():
name = dial.lineEdit_name.text()
sym = dial.lineEdit_sym.text()
if name == '':
name = 'Mineral'
if sym == '':
sym = 'XX'
#set special conditions to a bunch of strings or as a NaN
if self.manualConds == []:
SCs = np.nan
else:
SCs = ';'.join(self.manualConds)
params = {'Name':name, 'Chemical':sym,
'Crystal':self.comboBox_crystaltype.currentText(),
'UnitCell':self.comboBox_celltype.currentText(),
'SpaceGroup':int(self.comboBox_spacegroup.currentText().split(':')[0]),
'a':self.doubleSpinBox_a.value(),
'b':self.doubleSpinBox_b.value(),
'c':self.doubleSpinBox_c.value(),
'SpecialConditions':SCs}
self.mineraldb = self.mineraldb.append(params,ignore_index=True)
self.setMineralList()
QtCore.QObject.connect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral)
def setSettings(self):
"""Raise SettingsDialog and pass values to pyLATTICE parameters.
Grab current settings first."""
current = {'a max':self.doubleSpinBox_a.maximum(),
'b max':self.doubleSpinBox_b.maximum(),
'c max':self.doubleSpinBox_c.maximum()}
dial = SettingsDialog(current)
if dial.exec_():
amax = dial.maxa.value()
bmax = dial.maxb.value()
cmax = dial.maxc.value()
#set slider and spinbox maxima
self.doubleSpinBox_a.setMaximum(amax)
self.doubleSpinBox_b.setMaximum(bmax)
self.doubleSpinBox_c.setMaximum(cmax)
self.hSlider_a.setMaximum(int(10*amax))
self.hSlider_b.setMaximum(int(10*bmax))
self.hSlider_c.setMaximum(int(10*cmax))
def ManualConditions(self):
"""Raise the manual space group conditions dialog"""
dial = ManualConditionsDialog(conditions= self.manualConds)
if dial.exec_():
num = dial.manualCondList.count()
self.manualConds = [dial.manualCondList.item(i).text() for i in range(num)]
def SaveDSpace(self):
self.Save(self.DSpaces)
def SaveMineralDB(self):
self.Save(self.mineraldb)
def LoadMineralDB(self):
QtCore.QObject.disconnect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral)
ftypes = 'HDF (*.h5);;CSV (*.csv);;Excel (*.xlsx)'
fname,ffilter = QtGui.QFileDialog.getOpenFileNameAndFilter(self,caption='Load Mineral Database',directory=self.common.path,filter=ftypes)
fname = str(fname)
ffilter=str(ffilter)
#print(fname,ffilter)
name, ext = os.path.splitext(fname)
self.common.path = os.path.dirname(fname)
if ffilter.startswith('HDF'):
item = pd.read_hdf(name + '.h5','table')
elif ffilter.startswith('CSV'):
item = pd.read_csv(name + '.csv',sep=',')
elif ffilter.startswith('Excel'): #allow for different excel formats
sheetname,ok = QtGui.QInputDialog.getText(self,'Input Sheetname','Sheetname')
if ok and sheetname != '':
if ext == '.xlsx' or ext == '':
item = pd.read_excel(name + '.xlsx',str(sheetname))
elif ext == '.xls':
item = pd.read_excel(name + '.xls',str(sheetname))
self.mineraldb = item
else:
QtGui.QMessageBox.information(self, "Warning!", 'You must specify a sheet name!')
self.setMineralList()
QtCore.QObject.connect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral)
def Save(self,item):
#item should be pandas dataframe object
ftypes = 'HDF (*.h5);;CSV (*.csv);;Excel (*.xlsx)'
fname,ffilter = QtGui.QFileDialog.getSaveFileNameAndFilter(self,caption='Save D-Spacings',directory=self.common.path,filter=ftypes)
fname = str(fname)
ffilter=str(ffilter)
#print(fname,ffilter)
name, ext = os.path.splitext(fname)
self.common.path = os.path.dirname(fname)
print(name + ext)
if ffilter.startswith('HDF'):
item.to_hdf(name + '.h5','table')
elif ffilter.startswith('CSV'):
item.to_csv(name + '.csv',sep=',')
elif ffilter.startswith('Excel'): #allow for different excel formats
if ext == '.xlsx' or ext == '':
item.to_excel(name + '.xlsx')
elif ext == '.xls':
item.to_excel(name + '.xls')
################################################################################
############################### Plotting #######################################
################################################################################
def PlotDiffraction(self):
"""Plots the current list of spots and d-spacings.
For each point in self.DSpaces [d-space,h,k,l], determines anlges for plotting."""
#initialize plot with center spot only
self.Plot.clear()
self.common._x2 = False
self.Plot.set_xlabel(u'Distance (\u212B\u207B\u00B9)')#angstrom^-1
self.Plot.set_ylabel(u'Distance (\u212B\u207B\u00B9)')
#get values in energy, cam legnth, cam const. combo boxes
self.energy = self.spinBox_beamenergy.value()
self.camlength = self.spinBox_camlength.value()
self.camconst = self.doubleSpinBox_camconst.value()
#self.Plot.plot(0,0, linestyle = '', marker='o', markersize = 10, color = 'black',picker=5, label = u'0 0 0')
#add some labels
if self.checkBox_labels.isChecked() == True:
#center spot
#self.Plot.annotate(u'0 0 0', xy = (0,0), xytext=(0,10),textcoords = 'offset points', ha = 'center', va = 'bottom',
# bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.01))
#add crystal structure information in an annotation
#grab current values
self.a = self.doubleSpinBox_a.value(); self.b = self.doubleSpinBox_b.value(); self.c = self.doubleSpinBox_c.value()
alph = self.spinBox_alpha.value(); beta = self.spinBox_beta.value(); gam = self.spinBox_gamma.value()
plot_label = r'''%s: %s; a = %.2f, b = %.2f, c = %.2f; $\alpha$ = %d$^o$, $\beta$ = %d$^o$, $\gamma$ = %d$^o$''' % (self.comboBox_crystaltype.currentText(),self.comboBox_celltype.currentText(),self.a,self.b,self.c,alph,beta,gam)
ann = self.Plot.annotate(plot_label, xy=(0.02, 1.02), xycoords='axes fraction',bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.01))
ann.draggable() #make annotation draggable
#need to choose a reference point with smallest sum of absolute miller indices
#since self.DSpaces is sorted with largest d-space first, this will be the smallest sum of abs miller indices
#first point
rotation = np.radians(float(self.comboBox_rotate.currentText()))
d = np.array(self.DSpaces['d-space'],dtype=np.float)
ref = np.array(self.DSpaces.loc[0,['h','k','l']],dtype=np.int)
Q2 = np.array(self.DSpaces[['h','k','l']],dtype=np.int)
recip_vec = np.array([self.astar,self.bstar,self.cstar],dtype=np.float)
dir_vec = np.array([self.u,self.v,self.w],dtype=np.int)
#add extra factor if hcp unit cell
t = self.comboBox_crystaltype.currentText()
#must check that Forbidden dataframe isn't empty for a spcific zone axis
showf = self.checkBox_showforbidden.isChecked() and not self.Forbidden.empty
if t in ['Hexagonal','Trigonal']:
#print('Hexagonal')
#change dtypes
ref = np.array(ref,dtype=np.float)
Q2 = np.array(Q2,dtype=np.float)
lam = np.sqrt(2/3)*(self.c/self.a)
ref[2] = ref[2]/lam
ref = np.hstack([ref,-(ref[0]+ref[1])]) #add i direction, but to the end b/c it doesnt matter
Q2[:,2] = Q2[:,2]/lam
Q2 = np.append(Q2,np.array([-Q2[:,0]-Q2[:,1]]).T,axis=1)
theta,x,y = CalcSpotsHCP(d,Q2,ref,recip_vec,dir_vec,rotation)
if showf:
df = np.array(self.Forbidden['d-space'],dtype=np.float)
Q2f = np.array(self.Forbidden[['h','k','l']],dtype=np.int)
Q2f = np.array(Q2f,dtype=np.float)
Q2f[:,2] = Q2f[:,2]/lam
Q2f = np.append(Q2f,np.array([-Q2f[:,0]-Q2f[:,1]]).T,axis=1)
thetaf,xf,yf = CalcSpotsHCP(df,Q2f,ref,recip_vec,dir_vec,rotation)
else:
theta,x,y = CalcSpots(d,Q2,ref,recip_vec,self.G_inv,dir_vec,rotation)
if showf:
df = np.array(self.Forbidden['d-space'],dtype=np.float)
Q2f = np.array(self.Forbidden[['h','k','l']],dtype=np.int)
thetaf,xf,yf = CalcSpots(df,Q2f,ref,recip_vec,self.G_inv,dir_vec,rotation)
self.DSpaces['theta'] = np.degrees(theta).round(2); self.DSpaces['x'] = x; self.DSpaces['y'] = y
if showf:
self.Forbidden['theta'] = np.degrees(thetaf).round(2); self.Forbidden['x'] = xf; self.Forbidden['y'] = yf
for i in range(len(self.Forbidden)):
label = ' '.join([str(int(x)) for x in self.Forbidden.loc[i,['h','k','l']]]) #this is a bit dense, but makes a list of str() hkl values, then concatenates
#convert negative numbers to overline numbers for visual effect
for j,num in enumerate(self._overline_strings):
match = re.search(u'-%d' % (j+1),label)
if match:
label = re.sub(match.group(),num,label)
#add each label and coordinate to DSpace dataframe
# self.DSpaces.loc[i,'x'] = coords[0]
# self.DSpaces.loc[i,'y'] = coords[1]
self.Forbidden.loc[i,'label'] = label
#print(self.DSpaces)
#make label for each spot
for i in range(len(self.DSpaces)):
label = r' '.join([str(int(x)) for x in self.DSpaces.loc[i,['h','k','l']]]) #this is a bit dense, but makes a list of str() hkl values, then concatenates
#convert negative numbers to overline numbers for visual effect
for j,num in enumerate(self._overline_strings):
match = re.search(u'-%d' % (j+1),label)
if match:
label = re.sub(match.group(),num,label)
#add each label and coordinate to DSpace dataframe
# self.DSpaces.loc[i,'x'] = coords[0]
# self.DSpaces.loc[i,'y'] = coords[1]
label = r'$%s$' % label.replace(' ','\ ') #convert to mathtex string and add spaces
self.DSpaces.loc[i,'label'] = label
#add 000 spot
self.DSpaces.loc[len(self.DSpaces),['d-space','h','k','l','x','y','label']] = [0,0,0,0,0,0,r'$0\ 0\ 0$']
#print(self.DSpaces)
#scatterplots make it difficult to get data back in matplotlibwidget
# for i in range(len(self.DSpaces)):
self.Plot.plot(self.DSpaces['x'],self.DSpaces['y'],ls='',marker='o',markersize=10,color='k',picker=5)#,label='%i %i %i' % (self.DSpaces.loc[i,['h']],self.DSpaces.loc[i,['k']],self.DSpaces.loc[i,['l']]))
if showf:
self.Plot.plot(self.Forbidden['x'],self.Forbidden['y'],ls='',marker='o',markersize=7,color='gray', alpha=.7)
#xmax = max(self.DSpaces['x']); xmin = min(self.DSpaces['x'])
#ymax = max(self.DSpaces['y']); ymin = min(self.DSpaces['y'])
#self.Plot.set_xlim([1.5*xmin,1.5*xmax])
#self.Plot.set_ylim([1.5*ymin,1.5*ymax])
if self.checkBox_labels.isChecked() == True:
for i in range(len(self.DSpaces)):
#label = self.MathLabels(i)
label = self.DSpaces.loc[i,'label']
self.Plot.annotate(label, xy = (self.DSpaces.loc[i,'x'],self.DSpaces.loc[i,'y']), xytext=(0,10),textcoords = 'offset points', ha = 'center', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.01))
if showf:
for i in range(len(self.Forbidden)):
#label = self.MathLabels(i)
label = self.Forbidden.loc[i,'label']
self.Plot.annotate(label, xy = (self.Forbidden.loc[i,'x'],self.Forbidden.loc[i,'y']), xytext=(0,10),textcoords = 'offset points', ha = 'center', va = 'bottom',color='gray',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.01))
if showf:
self.common.Forbidden = self.Forbidden
self.common.DSpaces = self.DSpaces
self.common.a = self.doubleSpinBox_a.value()#for determining arrow size in plot
self.DiffWidget.canvas.draw()
def MathLabels(self,i):
'''Make labels with overlines instead of minus signs for plotting with matplotlib.
i is the index for DSpaces'''
label = r''
if self.DSpaces.loc[i,'h'] < 0:
label+=r'$\bar %i$ ' % abs(self.DSpaces.loc[i,'h'])
else:
label+=r'%i ' % self.DSpaces.loc[i,'h']
if self.DSpaces.loc[i,'k'] < 0:
label+=r'$\bar %i$ ' % abs(self.DSpaces.loc[i,'k'])
else:
label+=r'%i ' % self.DSpaces.loc[i,'k']
if self.DSpaces.loc[i,'l'] < 0:
label+=r'$\bar %i$' % abs(self.DSpaces.loc[i,'l'])
else:
label+=r'%i' % self.DSpaces.loc[i,'l']
return(label)
################################################################################
############################### Calculator #####################################
################################################################################
def Calculator(self):
"""Grabs current miller indices or zone directions and calls AngleCalc"""
h1 = int(self.comboBox_h1.currentText())
h2 = int(self.comboBox_h2.currentText())
k1 = int(self.comboBox_k1.currentText())
k2 = int(self.comboBox_k2.currentText())
l1 = int(self.comboBox_l1.currentText())
l2 = int(self.comboBox_l2.currentText())
i1 = -(h1+k1)
i2 = -(h2+k2)
hex = self.checkBox_hexagonal.isChecked()
angle = round(np.degrees(self.Diffraction.PlaneAngle(p1=np.array([h1,k1,l1]),p2=np.array([h2,k2,l2]),hex=hex)),2)
if np.isnan(angle):
QtGui.QMessageBox.information(self, "Uh, Oh!", 'There is no [0 0 0] direction/plane!')
else:
if self.checkBox_normals.isChecked():
self.lineEdit_angle.setText(u'φ = %.2f°' % angle)
bra = u'('
ket = u')'
elif not self.checkBox_normals.isChecked():
self.lineEdit_angle.setText(u'ρ = %.2f°' % angle)
bra = u'['
ket = u']'
if hex == False:
hkls = [bra,h1,k1,l1,ket,bra,h2,k2,l2,ket]
for j,it in enumerate(hkls):
if type(it) == int and it < 0:
hkls[j] = self._overline_strings[abs(it)-1]
self.lineEdit_dirs.setText(u'%s%s%s%s%s \u2220 %s%s%s%s%s' % tuple(hkls))
else:
hkls = [bra,h1,k1,i1,l1,ket,bra,h2,k2,i2,l2,ket]
for j,it in enumerate(hkls):
if type(it) == int and it < 0:
hkls[j] = self._overline_strings[abs(it)-1]
self.lineEdit_dirs.setText(u'%s%s%s%s%s%s \u2220 %s%s%s%s%s%s' % tuple(hkls))
def CalcLabels(self):
"""Rewrite labels for aesthetics"""
if self.checkBox_normals.isChecked():
self.label_h2.setText(u'h')
self.label_k2.setText(u'k')
self.label_l2.setText(u'l')
#self.label_h2.setAlignment(0x0004)
#self.label_k2.setAlignment(0x0004)
#self.label_l2.setAlignment(0x0004)
elif not self.checkBox_normals.isChecked():
self.label_h2.setText(u'u')
self.label_k2.setText(u'v')
self.label_l2.setText(u'w')
#self.label_h2.setAlignment(0x0004)
#self.label_k2.setAlignment(0x0004)
#self.label_l2.setAlignment(0x0004)
################################################################################
############################### Other ##########################################
################################################################################
def About(self):
"""Displays the About message"""
QtGui.QMessageBox.information(self, "About",
"""pyLATTICE %s:
Written by Evan Groopman
Based upon LATTICE (DOS) by Thomas Bernatowicz
c. 2011-2014
For help contact: eegroopm@gmail.com""" % self.version)
def HowTo(self):
"""How-to dialog box"""
howtomessage = (
"""
- Select crystal type, unit cell type, and lattice parameters to calculate the metric tensor.
- OR select a mineral from the database.
- D-spacings will be calculated between the selected Miller indices.
- Select zone axis and press "Plot" to show diffraction pattern.
- Select two diffraction spots to measure distance and angle.
Note: pyLATTICE only includes general reflection conditions for each space group. It does not includes special conditions based upon multiplcity, site symmetry, etc. EXCEPT for FCC diamond #227.
""")
QtGui.QMessageBox.information(self, "How to use",
howtomessage)
| gpl-2.0 |
najmacherrad/master_thesis | Waltz/plotcomparaisons_waltz.py | 1 | 7577 | # Waltz
# Compare results between wild type and mutant
# coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import csv
from scipy import stats
from pylab import plot, show, savefig, xlim, figure, \
hold, ylim, legend, boxplot, setp, axes
import pylab
from numpy import *
def getColumn(filename, column,deli):
results = csv.reader(open(filename), delimiter=deli)
return [result[column] for result in results]
#import files
file_wt = 'waltzresults_wt.csv'
file_mut = 'waltzresults_mut.csv'
#------------------------------------
# AGGREGATION
#------------------------------------
#--------------------------------------
# SCATTER PLOT
pred_wt = getColumn(file_wt,3,'\t')
pred_mut = getColumn(file_mut,3,'\t')
pred_wt.pop(0)
pred_mut.pop(0)
x,y=[],[]
for i in range(0,len(pred_wt)): #max=98.662207
if pred_wt[i]=='NA':
x.append(np.nan)
else:
x.append(float(pred_wt[i]))
for i in range(0,len(pred_mut)): #max=99.665552
if pred_mut[i]=='NA':
y.append(np.nan)
else:
y.append(float(pred_mut[i]))
fig = plt.figure()
a=b=[0,100]
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.plot(a,b,'r-')
plt.grid('on')
plt.xlim(-1,101)
plt.ylim(-1,101)
plt.xlabel('Wild types')
plt.ylabel('Deleterious DIDA mutants')
fig.savefig('waltz_wtVSmut.jpg')
#----------------
# PROBABILITY DENSITY CURVE
fig = figure()
mu1, std1 = stats.norm.fit(x)
mu2, std2 = stats.norm.fit(y)
xmin1, xmax1 = plt.xlim()
xmin2, xmax2 = plt.xlim()
x1 = np.linspace(xmin1, 100, 100)
x2 = np.linspace(xmin2, 100, 100)
p1 = stats.norm.pdf(x1, mu1, std1)
p2 = stats.norm.pdf(x2, mu2, std2)
plt.plot(x1, p1, 'k',label='Wild types (fit results: mu=%.2f,std=%.2f)'%(mu1, std1))
plt.plot(x2, p2, 'r',label='Deleterious DIDA mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu2, std2))
plt.xlabel('Aggregation conformation predicted values (amylogenic regions)')
plt.ylabel('Frequency')
plt.xlim(0,100)
#plt.ylim(0,0.0)
plt.legend(loc='upper right')
fig.savefig('histwaltz_missense.png')
#missense_wt - missense_mut
miss=[]
[miss.append(a_i - b_i) for a_i, b_i in zip(x, y)]
#KOLMOGOROV-SMINORV:
stats.kstest(miss,'norm') # (D,pvalue) = (0.3552063996073398, 0.0)
#So we reject H0 -> not normal distribution
#WILCOXON TEST:
stats.wilcoxon(miss) # (T, pvalue) = (4898.0, 0.29548245005836105)
#So we do not reject H0 -> There is no significant difference between wt and mut
#--------------------------------------
# AGGREGATION ENVIRONMENT
#--------------------------------------
#--------------------------------------
# SCATTER PLOT
pred_wt = getColumn(file_wt,4,'\t')
pred_mut = getColumn(file_mut,4,'\t')
pred_wt.pop(0)
pred_mut.pop(0)
x,y=[],[]
for i in range(0,len(pred_wt)): #max=98.662207
if pred_wt[i]=='NA':
x.append(np.nan)
else:
x.append(float(pred_wt[i]))
for i in range(0,len(pred_mut)): #max=98.996656
if pred_mut[i]=='NA':
y.append(np.nan)
else:
y.append(float(pred_mut[i]))
fig = plt.figure()
a=b=[0,100]
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.plot(a,b,'r-')
plt.grid('on')
plt.xlim(-1,101)
plt.ylim(-1,101)
plt.xlabel('Wild types')
plt.ylabel('Deleterious DIDA mutants')
fig.savefig('waltz_envt_wtVSmut.jpg')
#--------------------------------------
# HISTOGRAM
fig = figure()
mu1, std1 = stats.norm.fit(x)
mu2, std2 = stats.norm.fit(y)
xmin1, xmax1 = plt.xlim()
xmin2, xmax2 = plt.xlim()
x1 = np.linspace(xmin1, 100, 100)
x2 = np.linspace(xmin2, 100, 100)
p1 = stats.norm.pdf(x1, mu1, std1)
p2 = stats.norm.pdf(x2, mu2, std2)
plt.plot(x1, p1, 'k',label='Wild types (fit results: mu=%.2f,std=%.2f)'%(mu1, std1))
plt.plot(x2, p2, 'r',label='Deleterious DIDA mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu2, std2))
plt.xlabel('Aggregation conformation predicted values (amylogenic regions)')
plt.ylabel('Frequency')
plt.xlim(0,100)
plt.ylim(0,0.06)
plt.legend(loc='upper right')
fig.savefig('histwaltzenvt_missense.png')
#missense_wt - missense_mut
miss=[]
[miss.append(a_i - b_i) for a_i, b_i in zip(x, y)]
#KOLMOGOROV-SMINORV:
stats.kstest(miss,'norm') # (D,pvalue) = (0.34964202670995748, 0.0)
#So we reject H0 -> not normal distribution
#WILCOXON TEST:
stats.wilcoxon(miss) #-> (T, pvalue) = (8711.0, 0.55024961096028457)
#So we do not reject H0 -> There is no significant difference between wt and mut
#-----------------------------------------------------------------------------
# OUTLIERS FOR AGGREGATION ()
#-----------------------------------------------------------------------------
pred_wt = getColumn(file_wt,3,'\t')
pred_mut = getColumn(file_mut,3,'\t')
pred_wt.pop(0)
pred_mut.pop(0)
pred_envt_wt = getColumn(file_wt,4,'\t')
pred_envt_mut = getColumn(file_mut,4,'\t')
pred_envt_wt.pop(0)
pred_envt_mut.pop(0)
variant_liste = getColumn(file_wt,0,'\t')
output = open('waltz_outliers.csv','w')
output.write('ID,agg_wt,agg_mut,difference,agg_envt_wt,agg_envt_mut,difference_envt\n')
for i in range(0,len(pred_wt)):
for j in range(0,len(pred_mut)):
if i==j:
if pred_wt[i]!='NA'and pred_mut[j]!='NA':
if (abs(float(pred_wt[i])-float(pred_mut[j]))) > 20:
output.write(variant_liste[i+1] + ',' + pred_wt[i] + ',' + pred_mut[j] + ',' + str(abs(float(pred_wt[i])-float(pred_mut[j]))) + ',' + pred_envt_wt[i] + ',' + pred_envt_mut[i] + ',' + str(abs(float(pred_envt_wt[i])-float(pred_envt_mut[j]))) + '\n')
output.close()
#-------------------------------------------------------------------------------
#COMPARISON WITH NETSURFP RSA
#-------------------------------------------------------------------------------
W_wt = pd.read_csv(file_wt,'\t')
W_mut = pd.read_csv(file_mut,'\t')
W_wt['DWaltz'] = ''
W_wt['DWaltz'] = W_wt.aggregation - W_mut.aggregation
W_wt['DWaltz_envt'] = ''
W_wt['DWaltz_envt'] = W_wt.aggregation_envt - W_mut.aggregation_envt
W_wt = W_wt.drop(['aggregation','aggregation_envt'], 1)
W_wt.to_csv('waltzresults_compare.csv', index=False)
#RESIDUE
waltz = getColumn('waltzresults_compare.csv',3,',')
waltz.pop(0)
netsurfp = getColumn('netsurfpresults_compare.csv',3,',')
netsurfp.pop(0)
x,y=[],[]
for i in range(0,len(netsurfp)): #min=-0.183 and max=0.302
if netsurfp[i]=='':
x.append(np.nan)
else:
x.append(float(netsurfp[i]))
for i in range(0,len(waltz)): #min=-98.862207 and max=98.327759
if waltz[i]=='':
y.append(np.nan)
else:
y.append(float(waltz[i]))
fig = plt.figure()
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.grid('on')
plt.xlim(-0.4,0.4)
plt.ylim(-100,100)
plt.xlabel('delta(Solvent accessibility prediction) by NetSurfP')
plt.ylabel('delta(Aggregation conformation prediction) by Waltz')
fig.savefig('WaltzVSnetsurfp.jpg')
#ENVIRONMENT
waltz_envt = getColumn('waltzresults_compare.csv',4,',')
waltz_envt.pop(0)
netsurfp_envt = getColumn('netsurfpresults_compare.csv',4,',')
netsurfp_envt.pop(0)
x,y=[],[]
for i in range(0,len(netsurfp_envt)): #min=-0.183 and max=0.302
if netsurfp_envt[i]=='':
x.append(np.nan)
else:
x.append(float(netsurfp_envt[i]))
for i in range(0,len(waltz_envt)): #min=-98.862207 and max=98.327759
if waltz_envt[i]=='':
y.append(np.nan)
else:
y.append(float(waltz_envt[i]))
fig = plt.figure()
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.grid('on')
plt.xlim(-0.4,0.4)
plt.ylim(-100,100)
plt.xlabel('delta(Solvent accessibility prediction) by NetSurfP')
plt.ylabel('delta(Aggregation conformation prediction) by Waltz')
fig.savefig('WaltzVSnetsurfp_envt.jpg')
| mit |
satishgoda/bokeh | examples/plotting/file/unemployment.py | 46 | 1846 | from collections import OrderedDict
import numpy as np
from bokeh.plotting import ColumnDataSource, figure, show, output_file
from bokeh.models import HoverTool
from bokeh.sampledata.unemployment1948 import data
# Read in the data with pandas. Convert the year column to string
data['Year'] = [str(x) for x in data['Year']]
years = list(data['Year'])
months = ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]
data = data.set_index('Year')
# this is the colormap from the original plot
colors = [
"#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce",
"#ddb7b1", "#cc7878", "#933b41", "#550b1d"
]
# Set up the data for plotting. We will need to have values for every
# pair of year/month names. Map the rate to a color.
month = []
year = []
color = []
rate = []
for y in years:
for m in months:
month.append(m)
year.append(y)
monthly_rate = data[m][y]
rate.append(monthly_rate)
color.append(colors[min(int(monthly_rate)-2, 8)])
source = ColumnDataSource(
data=dict(month=month, year=year, color=color, rate=rate)
)
output_file('unemployment.html')
TOOLS = "resize,hover,save,pan,box_zoom,wheel_zoom"
p = figure(title="US Unemployment (1948 - 2013)",
x_range=years, y_range=list(reversed(months)),
x_axis_location="above", plot_width=900, plot_height=400,
toolbar_location="left", tools=TOOLS)
p.rect("year", "month", 1, 1, source=source,
color="color", line_color=None)
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "5pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = np.pi/3
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
('date', '@month @year'),
('rate', '@rate'),
])
show(p) # show the plot
| bsd-3-clause |
tyler-abbot/psid_py | setup.py | 1 | 2486 | """A setup module for psidPy
Based on the pypa sample project.
A tool to download data and build psid panels based on psidR by Florian Oswald.
See:
https://github.com/floswald/psidR
https://github.com/tyler-abbot/psidPy
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='psid_py',
version='1.0.2',
description='A tool to build PSID panels.',
# The project's main homepage
url='https://github.com/tyler-abbot/psidPy',
# Author details
author='Tyler Abbot',
author_email='tyler.abbot@sciencespo.fr',
# Licensing information
license='MIT',
classifiers=[
#How mature is this project?
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Information Analysis',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'],
# What does your project relate to?
keywords='statistics econometrics data',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests',
'pandas',
'beautifulsoup4'],
)
| mit |
gfyoung/pandas | pandas/tests/indexes/common.py | 2 | 28221 | import gc
from typing import Type
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
from pandas.core.dtypes.common import is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
Index,
Int64Index,
IntervalIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
Series,
TimedeltaIndex,
UInt64Index,
isna,
)
import pandas._testing as tm
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
class Base:
""" base class for index sub-class tests """
_holder: Type[Index]
_compat_props = ["shape", "ndim", "size", "nbytes"]
def create_index(self) -> Index:
raise NotImplementedError("Method not implemented")
def test_pickle_compat_construction(self):
# need an object to create with
msg = (
r"Index\(\.\.\.\) must be called with a collection of some "
r"kind, None was passed|"
r"__new__\(\) missing 1 required positional argument: 'data'|"
r"__new__\(\) takes at least 2 arguments \(1 given\)"
)
with pytest.raises(TypeError, match=msg):
self._holder()
@pytest.mark.parametrize("name", [None, "new_name"])
def test_to_frame(self, name):
# see GH-15230, GH-22580
idx = self.create_index()
if name:
idx_name = name
else:
idx_name = idx.name or 0
df = idx.to_frame(name=idx_name)
assert df.index is idx
assert len(df.columns) == 1
assert df.columns[0] == idx_name
assert df[idx_name].values is not idx.values
df = idx.to_frame(index=False, name=idx_name)
assert df.index is not idx
def test_shift(self):
# GH8083 test the base class for shift
idx = self.create_index()
msg = (
f"This method is only implemented for DatetimeIndex, PeriodIndex and "
f"TimedeltaIndex; Got type {type(idx).__name__}"
)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1, 2)
def test_constructor_name_unhashable(self):
# GH#29069 check that name is hashable
# See also same-named test in tests.series.test_constructors
idx = self.create_index()
with pytest.raises(TypeError, match="Index.name must be a hashable type"):
type(idx)(idx, name=[])
def test_create_index_existing_name(self):
# GH11193, when an existing index is passed, and a new name is not
# specified, the new index should inherit the previous object name
expected = self.create_index()
if not isinstance(expected, MultiIndex):
expected.name = "foo"
result = Index(expected)
tm.assert_index_equal(result, expected)
result = Index(expected, name="bar")
expected.name = "bar"
tm.assert_index_equal(result, expected)
else:
expected.names = ["foo", "bar"]
result = Index(expected)
tm.assert_index_equal(
result,
Index(
Index(
[
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
],
dtype="object",
),
names=["foo", "bar"],
),
)
result = Index(expected, names=["A", "B"])
tm.assert_index_equal(
result,
Index(
Index(
[
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
],
dtype="object",
),
names=["A", "B"],
),
)
def test_numeric_compat(self):
idx = self.create_index()
# Check that this doesn't cover MultiIndex case, if/when it does,
# we can remove multi.test_compat.test_numeric_compat
assert not isinstance(idx, MultiIndex)
if type(idx) is Index:
return
typ = type(idx._data).__name__
lmsg = "|".join(
[
rf"unsupported operand type\(s\) for \*: '{typ}' and 'int'",
"cannot perform (__mul__|__truediv__|__floordiv__) with "
f"this index type: {typ}",
]
)
with pytest.raises(TypeError, match=lmsg):
idx * 1
rmsg = "|".join(
[
rf"unsupported operand type\(s\) for \*: 'int' and '{typ}'",
"cannot perform (__rmul__|__rtruediv__|__rfloordiv__) with "
f"this index type: {typ}",
]
)
with pytest.raises(TypeError, match=rmsg):
1 * idx
div_err = lmsg.replace("*", "/")
with pytest.raises(TypeError, match=div_err):
idx / 1
div_err = rmsg.replace("*", "/")
with pytest.raises(TypeError, match=div_err):
1 / idx
floordiv_err = lmsg.replace("*", "//")
with pytest.raises(TypeError, match=floordiv_err):
idx // 1
floordiv_err = rmsg.replace("*", "//")
with pytest.raises(TypeError, match=floordiv_err):
1 // idx
def test_logical_compat(self):
idx = self.create_index()
with pytest.raises(TypeError, match="cannot perform all"):
idx.all()
with pytest.raises(TypeError, match="cannot perform any"):
idx.any()
def test_reindex_base(self):
idx = self.create_index()
expected = np.arange(idx.size, dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
with pytest.raises(ValueError, match="Invalid fill method"):
idx.get_indexer(idx, method="invalid")
def test_get_indexer_consistency(self, index):
# See GH 16819
if isinstance(index, IntervalIndex):
# requires index.is_non_overlapping
return
if index.is_unique:
indexer = index.get_indexer(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
else:
e = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=e):
index.get_indexer(index[0:2])
indexer, _ = index.get_indexer_non_unique(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
def test_ndarray_compat_properties(self):
idx = self.create_index()
assert idx.T.equals(idx)
assert idx.transpose().equals(idx)
values = idx.values
for prop in self._compat_props:
assert getattr(idx, prop) == getattr(values, prop)
# test for validity
idx.nbytes
idx.values.nbytes
def test_repr_roundtrip(self):
idx = self.create_index()
tm.assert_index_equal(eval(repr(idx)), idx)
def test_repr_max_seq_item_setting(self):
# GH10182
idx = self.create_index()
idx = idx.repeat(50)
with pd.option_context("display.max_seq_items", None):
repr(idx)
assert "..." not in str(idx)
def test_copy_name(self, index):
# gh-12309: Check that the "name" argument
# passed at initialization is honored.
if isinstance(index, MultiIndex):
return
first = type(index)(index, copy=True, name="mario")
second = type(first)(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
# Not using tm.assert_index_equal() since names differ.
assert index.equals(first)
assert first.name == "mario"
assert second.name == "mario"
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
if not isinstance(index, CategoricalIndex):
# See gh-13365
s3 = s1 * s2
assert s3.index.name == "mario"
def test_copy_name2(self, index):
# gh-35592
if isinstance(index, MultiIndex):
return
assert index.copy(name="mario").name == "mario"
with pytest.raises(ValueError, match="Length of new names must be 1, got 2"):
index.copy(name=["mario", "luigi"])
msg = f"{type(index).__name__}.name must be a hashable type"
with pytest.raises(TypeError, match=msg):
index.copy(name=[["mario"]])
def test_copy_dtype_deprecated(self, index):
# GH35853
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
index.copy(dtype=object)
def test_ensure_copied_data(self, index):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
init_kwargs = {}
if isinstance(index, PeriodIndex):
# Needs "freq" specification:
init_kwargs["freq"] = index.freq
elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):
# RangeIndex cannot be initialized from data
# MultiIndex and CategoricalIndex are tested separately
return
index_type = type(index)
result = index_type(index.values, copy=True, **init_kwargs)
if is_datetime64tz_dtype(index.dtype):
result = result.tz_localize("UTC").tz_convert(index.tz)
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
index = index._with_freq(None)
tm.assert_index_equal(index, result)
if isinstance(index, PeriodIndex):
# .values an object array of Period, thus copied
result = index_type(ordinal=index.asi8, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.asi8, result.asi8, check_same="same")
elif isinstance(index, IntervalIndex):
# checked in test_interval.py
pass
else:
result = index_type(index.values, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.values, result.values, check_same="same")
def test_memory_usage(self, index):
index._engine.clear_mapping()
result = index.memory_usage()
if index.empty:
# we report 0 for no-length
assert result == 0
return
# non-zero length
index.get_loc(index[0])
result2 = index.memory_usage()
result3 = index.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(index, (RangeIndex, IntervalIndex)):
assert result2 > result
if index.inferred_type == "object":
assert result3 > result2
def test_argsort(self, request, index):
# separately tested
if isinstance(index, CategoricalIndex):
return
result = index.argsort()
expected = np.array(index).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self, index):
result = np.argsort(index)
expected = index.argsort()
tm.assert_numpy_array_equal(result, expected)
# these are the only two types that perform
# pandas compatibility input validation - the
# rest already perform separate (or no) such
# validation via their 'values' attribute as
# defined in pandas.core.indexes/base.py - they
# cannot be changed at the moment due to
# backwards compatibility concerns
if isinstance(type(index), (CategoricalIndex, RangeIndex)):
# TODO: why type(index)?
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, axis=1)
msg = "the 'kind' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, kind="mergesort")
msg = "the 'order' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(index, order=("a", "b"))
def test_repeat(self):
rep = 2
i = self.create_index()
expected = Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
i = self.create_index()
rep = np.arange(len(i))
expected = Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
def test_numpy_repeat(self):
rep = 2
i = self.create_index()
expected = i.repeat(rep)
tm.assert_index_equal(np.repeat(i, rep), expected)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.repeat(i, rep, axis=0)
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where(self, klass):
i = self.create_index()
if isinstance(i, (pd.DatetimeIndex, pd.TimedeltaIndex)):
# where does not preserve freq
i = i._with_freq(None)
cond = [True] * len(i)
result = i.where(klass(cond))
expected = i
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(i[1:])
expected = Index([i._na_value] + i[1:].tolist(), dtype=i.dtype)
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_insert_base(self, index):
result = index[1:4]
if not len(index):
return
# test 0th element
assert index[0:4].equals(result.insert(0, index[0]))
def test_delete_base(self, index):
if not len(index):
return
if isinstance(index, RangeIndex):
# tested in class
return
expected = index[1:]
result = index.delete(0)
assert result.equals(expected)
assert result.name == expected.name
expected = index[:-1]
result = index.delete(-1)
assert result.equals(expected)
assert result.name == expected.name
length = len(index)
msg = f"index {length} is out of bounds for axis 0 with size {length}"
with pytest.raises(IndexError, match=msg):
index.delete(length)
def test_equals(self, index):
if isinstance(index, IntervalIndex):
# IntervalIndex tested separately, the index.equals(index.astype(object))
# fails for IntervalIndex
return
assert index.equals(index)
assert index.equals(index.copy())
assert index.equals(index.astype(object))
assert not index.equals(list(index))
assert not index.equals(np.array(index))
# Cannot pass in non-int64 dtype to RangeIndex
if not isinstance(index, RangeIndex):
same_values = Index(index, dtype=object)
assert index.equals(same_values)
assert same_values.equals(index)
if index.nlevels == 1:
# do not test MultiIndex
assert not index.equals(Series(index))
def test_equals_op(self):
# GH9947, GH10637
index_a = self.create_index()
n = len(index_a)
index_b = index_a[0:-1]
index_c = index_a[0:-1].append(index_a[-2:-1])
index_d = index_a[0:1]
msg = "Lengths must match|could not be broadcast"
with pytest.raises(ValueError, match=msg):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
tm.assert_numpy_array_equal(index_a == index_a, expected1)
tm.assert_numpy_array_equal(index_a == index_c, expected2)
# test comparisons with numpy arrays
array_a = np.array(index_a)
array_b = np.array(index_a[0:-1])
array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))
array_d = np.array(index_a[0:1])
with pytest.raises(ValueError, match=msg):
index_a == array_b
tm.assert_numpy_array_equal(index_a == array_a, expected1)
tm.assert_numpy_array_equal(index_a == array_c, expected2)
# test comparisons with Series
series_a = Series(array_a)
series_b = Series(array_b)
series_c = Series(array_c)
series_d = Series(array_d)
with pytest.raises(ValueError, match=msg):
index_a == series_b
tm.assert_numpy_array_equal(index_a == series_a, expected1)
tm.assert_numpy_array_equal(index_a == series_c, expected2)
# cases where length is 1 for one of them
with pytest.raises(ValueError, match="Lengths must match"):
index_a == index_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
index_a == array_d
msg = "Can only compare identically-labeled Series objects"
with pytest.raises(ValueError, match=msg):
series_a == series_d
with pytest.raises(ValueError, match="Lengths must match"):
series_a == array_d
# comparing with a scalar should broadcast; note that we are excluding
# MultiIndex because in this case each item in the index is a tuple of
# length 2, and therefore is considered an array of length 2 in the
# comparison instead of a scalar
if not isinstance(index_a, MultiIndex):
expected3 = np.array([False] * (len(index_a) - 2) + [True, False])
# assuming the 2nd to last item is unique in the data
item = index_a[-2]
tm.assert_numpy_array_equal(index_a == item, expected3)
# For RangeIndex we can convert to Int64Index
tm.assert_series_equal(series_a == item, Series(expected3))
def test_format(self):
# GH35439
idx = self.create_index()
expected = [str(x) for x in idx]
assert idx.format() == expected
def test_format_empty(self):
# GH35712
empty_idx = self._holder([])
assert empty_idx.format() == []
assert empty_idx.format(name=True) == [""]
def test_hasnans_isnans(self, index):
# GH 11343, added tests for hasnans / isnans
if isinstance(index, MultiIndex):
return
# cases in indices doesn't include NaN
idx = index.copy(deep=True)
expected = np.array([False] * len(idx), dtype=bool)
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is False
idx = index.copy(deep=True)
values = np.asarray(idx.values)
if len(index) == 0:
return
elif isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
elif isinstance(index, (Int64Index, UInt64Index)):
return
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = type(index)(values, freq=index.freq)
else:
idx = type(index)(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is True
def test_fillna(self, index):
# GH 11343
if len(index) == 0:
pass
elif isinstance(index, MultiIndex):
idx = index.copy(deep=True)
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.fillna(idx[0])
else:
idx = index.copy(deep=True)
result = idx.fillna(idx[0])
tm.assert_index_equal(result, idx)
assert result is not idx
msg = "'value' must be a scalar, passed: "
with pytest.raises(TypeError, match=msg):
idx.fillna([idx[0]])
idx = index.copy(deep=True)
values = np.asarray(idx.values)
if isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
elif isinstance(index, (Int64Index, UInt64Index)):
return
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = type(index)(values, freq=index.freq)
else:
idx = type(index)(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans is True
def test_nulls(self, index):
# this is really a smoke test for the methods
# as these are adequately tested for function elsewhere
if len(index) == 0:
tm.assert_numpy_array_equal(index.isna(), np.array([], dtype=bool))
elif isinstance(index, MultiIndex):
idx = index.copy()
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.isna()
elif not index.hasnans:
tm.assert_numpy_array_equal(index.isna(), np.zeros(len(index), dtype=bool))
tm.assert_numpy_array_equal(index.notna(), np.ones(len(index), dtype=bool))
else:
result = isna(index)
tm.assert_numpy_array_equal(index.isna(), result)
tm.assert_numpy_array_equal(index.notna(), ~result)
def test_empty(self):
# GH 15270
index = self.create_index()
assert not index.empty
assert index[:0].empty
def test_join_self_unique(self, join_type):
index = self.create_index()
if index.is_unique:
joined = index.join(index, how=join_type)
assert (index == joined).all()
def test_map(self):
# callable
index = self.create_index()
# we don't infer UInt64
if isinstance(index, pd.UInt64Index):
expected = index.astype("int64")
else:
expected = index
result = index.map(lambda x: x)
# For RangeIndex we convert to Int64Index
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: Series(values, index),
],
)
def test_map_dictlike(self, mapper):
index = self.create_index()
if isinstance(index, (pd.CategoricalIndex, pd.IntervalIndex)):
pytest.skip(f"skipping tests for {type(index)}")
identity = mapper(index.values, index)
# we don't infer to UInt64 for a dict
if isinstance(index, pd.UInt64Index) and isinstance(identity, dict):
expected = index.astype("int64")
else:
expected = index
result = index.map(identity)
# For RangeIndex we convert to Int64Index
tm.assert_index_equal(result, expected)
# empty mappable
expected = Index([np.nan] * len(index))
result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
def test_map_str(self):
# GH 31202
index = self.create_index()
result = index.map(str)
expected = Index([str(x) for x in index], dtype=object)
tm.assert_index_equal(result, expected)
def test_putmask_with_wrong_mask(self):
# GH18368
index = self.create_index()
fill = index[0]
msg = "putmask: mask and data must be the same size"
with pytest.raises(ValueError, match=msg):
index.putmask(np.ones(len(index) + 1, np.bool_), fill)
with pytest.raises(ValueError, match=msg):
index.putmask(np.ones(len(index) - 1, np.bool_), fill)
with pytest.raises(ValueError, match=msg):
index.putmask("foo", fill)
@pytest.mark.parametrize("copy", [True, False])
@pytest.mark.parametrize("name", [None, "foo"])
@pytest.mark.parametrize("ordered", [True, False])
def test_astype_category(self, copy, name, ordered):
# GH 18630
index = self.create_index()
if name:
index = index.rename(name)
# standard categories
dtype = CategoricalDtype(ordered=ordered)
result = index.astype(dtype, copy=copy)
expected = CategoricalIndex(index.values, name=name, ordered=ordered)
tm.assert_index_equal(result, expected)
# non-standard categories
dtype = CategoricalDtype(index.unique().tolist()[:-1], ordered)
result = index.astype(dtype, copy=copy)
expected = CategoricalIndex(index.values, name=name, dtype=dtype)
tm.assert_index_equal(result, expected)
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
result = index.astype("category", copy=copy)
expected = CategoricalIndex(index.values, name=name)
tm.assert_index_equal(result, expected)
def test_is_unique(self):
# initialize a unique index
index = self.create_index().drop_duplicates()
assert index.is_unique is True
# empty index should be unique
index_empty = index[:0]
assert index_empty.is_unique is True
# test basic dupes
index_dup = index.insert(0, index[0])
assert index_dup.is_unique is False
# single NA should be unique
index_na = index.insert(0, np.nan)
assert index_na.is_unique is True
# multiple NA should not be unique
index_na_dup = index_na.insert(0, np.nan)
assert index_na_dup.is_unique is False
@pytest.mark.arm_slow
def test_engine_reference_cycle(self):
# GH27585
index = self.create_index()
nrefs_pre = len(gc.get_referrers(index))
index._engine
assert len(gc.get_referrers(index)) == nrefs_pre
def test_getitem_2d_deprecated(self):
# GH#30588
idx = self.create_index()
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
res = idx[:, None]
assert isinstance(res, np.ndarray), type(res)
def test_contains_requires_hashable_raises(self):
idx = self.create_index()
msg = "unhashable type: 'list'"
with pytest.raises(TypeError, match=msg):
[] in idx
msg = "|".join(
[
r"unhashable type: 'dict'",
r"must be real number, not dict",
r"an integer is required",
r"\{\}",
r"pandas\._libs\.interval\.IntervalTree' is not iterable",
]
)
with pytest.raises(TypeError, match=msg):
{} in idx._engine
def test_copy_shares_cache(self):
# GH32898, GH36840
idx = self.create_index()
idx.get_loc(idx[0]) # populates the _cache.
copy = idx.copy()
assert copy._cache is idx._cache
def test_shallow_copy_shares_cache(self):
# GH32669, GH36840
idx = self.create_index()
idx.get_loc(idx[0]) # populates the _cache.
shallow_copy = idx._shallow_copy()
assert shallow_copy._cache is idx._cache
shallow_copy = idx._shallow_copy(idx._data)
assert shallow_copy._cache is not idx._cache
assert shallow_copy._cache == {}
| bsd-3-clause |
helloworldajou/webserver | demos/classifier_webcam.py | 4 | 7059 | #!/usr/bin/env python2
#
# Example to run classifier on webcam stream.
# Brandon Amos & Vijayenthiran
# 2016/06/21
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Contrib: Vijayenthiran
# This example file shows to run a classifier on webcam stream. You need to
# run the classifier.py to generate classifier with your own dataset.
# To run this file from the openface home dir:
# ./demo/classifier_webcam.py <path-to-your-classifier>
import time
start = time.time()
import argparse
import cv2
import os
import pickle
import sys
import numpy as np
np.set_printoptions(precision=2)
from sklearn.mixture import GMM
import openface
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
def getRep(bgrImg):
start = time.time()
if bgrImg is None:
raise Exception("Unable to load image/frame")
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
if args.verbose:
print(" + Original size: {}".format(rgbImg.shape))
if args.verbose:
print("Loading the image took {} seconds.".format(time.time() - start))
start = time.time()
# Get the largest face bounding box
# bb = align.getLargestFaceBoundingBox(rgbImg) #Bounding box
# Get all bounding boxes
bb = align.getAllFaceBoundingBoxes(rgbImg)
if bb is None:
# raise Exception("Unable to find a face: {}".format(imgPath))
return None
if args.verbose:
print("Face detection took {} seconds.".format(time.time() - start))
start = time.time()
alignedFaces = []
for box in bb:
alignedFaces.append(
align.align(
args.imgDim,
rgbImg,
box,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE))
if alignedFaces is None:
raise Exception("Unable to align the frame")
if args.verbose:
print("Alignment took {} seconds.".format(time.time() - start))
start = time.time()
reps = []
for alignedFace in alignedFaces:
reps.append(net.forward(alignedFace))
if args.verbose:
print("Neural network forward pass took {} seconds.".format(
time.time() - start))
# print (reps)
return reps
def infer(img, args):
with open(args.classifierModel, 'r') as f:
if sys.version_info[0] < 3:
(le, clf) = pickle.load(f) # le - label and clf - classifer
else:
(le, clf) = pickle.load(f, encoding='latin1') # le - label and clf - classifer
reps = getRep(img)
persons = []
confidences = []
for rep in reps:
try:
rep = rep.reshape(1, -1)
except:
print ("No Face detected")
return (None, None)
start = time.time()
predictions = clf.predict_proba(rep).ravel()
# print (predictions)
maxI = np.argmax(predictions)
# max2 = np.argsort(predictions)[-3:][::-1][1]
persons.append(le.inverse_transform(maxI))
# print (str(le.inverse_transform(max2)) + ": "+str( predictions [max2]))
# ^ prints the second prediction
confidences.append(predictions[maxI])
if args.verbose:
print("Prediction took {} seconds.".format(time.time() - start))
pass
# print("Predict {} with {:.2f} confidence.".format(person.decode('utf-8'), confidence))
if isinstance(clf, GMM):
dist = np.linalg.norm(rep - clf.means_[maxI])
print(" + Distance from the mean: {}".format(dist))
pass
return (persons, confidences)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--dlibFacePredictor',
type=str,
help="Path to dlib's face predictor.",
default=os.path.join(
dlibModelDir,
"shape_predictor_68_face_landmarks.dat"))
parser.add_argument(
'--networkModel',
type=str,
help="Path to Torch network model.",
default=os.path.join(
openfaceModelDir,
'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument(
'--captureDevice',
type=int,
default=0,
help='Capture device. 0 for latop webcam and 1 for usb webcam')
parser.add_argument('--width', type=int, default=320)
parser.add_argument('--height', type=int, default=240)
parser.add_argument('--threshold', type=float, default=0.5)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument(
'classifierModel',
type=str,
help='The Python pickle representing the classifier. This is NOT the Torch network model, which can be set with --networkModel.')
args = parser.parse_args()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(
args.networkModel,
imgDim=args.imgDim,
cuda=args.cuda)
# Capture device. Usually 0 will be webcam and 1 will be usb cam.
video_capture = cv2.VideoCapture(args.captureDevice)
video_capture.set(3, args.width)
video_capture.set(4, args.height)
confidenceList = []
while True:
ret, frame = video_capture.read()
persons, confidences = infer(frame, args)
print ("P: " + str(persons) + " C: " + str(confidences))
try:
# append with two floating point precision
confidenceList.append('%.2f' % confidences[0])
except:
# If there is no face detected, confidences matrix will be empty.
# We can simply ignore it.
pass
for i, c in enumerate(confidences):
if c <= args.threshold: # 0.5 is kept as threshold for known face.
persons[i] = "_unknown"
# Print the person name and conf value on the frame
cv2.putText(frame, "P: {} C: {}".format(persons, confidences),
(50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.imshow('', frame)
# quit the program on the press of key 'q'
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
| apache-2.0 |
roxyboy/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
pp-mo/iris | lib/iris/quickplot.py | 2 | 9074 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
High-level plotting extensions to :mod:`iris.plot`.
These routines work much like their :mod:`iris.plot` counterparts, but they
automatically add a plot title, axis titles, and a colour bar when appropriate.
See also: :ref:`matplotlib <matplotlib:users-guide-index>`.
"""
import cf_units
import matplotlib.pyplot as plt
import iris.config
import iris.coords
import iris.plot as iplt
def _use_symbol(units):
# For non-time units use the shortest unit representation.
# E.g. prefer 'K' over 'kelvin', but not '0.0174532925199433 rad'
# over 'degrees'
return (
not units.is_time()
and not units.is_time_reference()
and len(units.symbol) < len(str(units))
)
def _title(cube_or_coord, with_units):
if cube_or_coord is None or isinstance(cube_or_coord, int):
title = ""
else:
title = cube_or_coord.name().replace("_", " ").capitalize()
units = cube_or_coord.units
if with_units and not (
units.is_unknown()
or units.is_no_unit()
or units == cf_units.Unit("1")
):
if _use_symbol(units):
units = units.symbol
title += " / {}".format(units)
return title
def _label(cube, mode, result=None, ndims=2, coords=None, axes=None):
"""Puts labels on the current plot using the given cube."""
if axes is None:
axes = plt.gca()
axes.set_title(_title(cube, with_units=False))
if result is not None:
draw_edges = mode == iris.coords.POINT_MODE
bar = plt.colorbar(
result, orientation="horizontal", drawedges=draw_edges
)
has_known_units = not (
cube.units.is_unknown() or cube.units.is_no_unit()
)
if has_known_units and cube.units != cf_units.Unit("1"):
# Use shortest unit representation for anything other than time
if _use_symbol(cube.units):
bar.set_label(cube.units.symbol)
else:
bar.set_label(cube.units)
# Remove the tick which is put on the colorbar by default.
bar.ax.tick_params(length=0)
if coords is None:
plot_defn = iplt._get_plot_defn(cube, mode, ndims)
else:
plot_defn = iplt._get_plot_defn_custom_coords_picked(
cube, coords, mode, ndims=ndims
)
if ndims == 2:
if not iplt._can_draw_map(plot_defn.coords):
axes.set_ylabel(_title(plot_defn.coords[0], with_units=True))
axes.set_xlabel(_title(plot_defn.coords[1], with_units=True))
elif ndims == 1:
axes.set_xlabel(_title(plot_defn.coords[0], with_units=True))
axes.set_ylabel(_title(cube, with_units=True))
else:
msg = (
"Unexpected number of dimensions ({}) given to "
"_label.".format(ndims)
)
raise ValueError(msg)
def _label_with_bounds(cube, result=None, ndims=2, coords=None, axes=None):
_label(cube, iris.coords.BOUND_MODE, result, ndims, coords, axes)
def _label_with_points(cube, result=None, ndims=2, coords=None, axes=None):
_label(cube, iris.coords.POINT_MODE, result, ndims, coords, axes)
def _get_titles(u_object, v_object):
if u_object is None:
u_object = iplt._u_object_from_v_object(v_object)
xunits = u_object is not None and not u_object.units.is_time_reference()
yunits = not v_object.units.is_time_reference()
xlabel = _title(u_object, with_units=xunits)
ylabel = _title(v_object, with_units=yunits)
title = ""
if u_object is None:
title = _title(v_object, with_units=False)
elif isinstance(u_object, iris.cube.Cube) and not isinstance(
v_object, iris.cube.Cube
):
title = _title(u_object, with_units=False)
elif isinstance(v_object, iris.cube.Cube) and not isinstance(
u_object, iris.cube.Cube
):
title = _title(v_object, with_units=False)
return xlabel, ylabel, title
def _label_1d_plot(*args, **kwargs):
if len(args) > 1 and isinstance(
args[1], (iris.cube.Cube, iris.coords.Coord)
):
xlabel, ylabel, title = _get_titles(*args[:2])
else:
xlabel, ylabel, title = _get_titles(None, args[0])
axes = kwargs.pop("axes", None)
if len(kwargs) != 0:
msg = "Unexpected kwargs {} given to _label_1d_plot".format(
kwargs.keys()
)
raise ValueError(msg)
if axes is None:
axes = plt.gca()
axes.set_title(title)
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
def contour(cube, *args, **kwargs):
"""
Draws contour lines on a labelled plot based on the given Cube.
With the basic call signature, contour "level" values are chosen
automatically::
contour(cube)
Supply a number to use *N* automatically chosen levels::
contour(cube, N)
Supply a sequence *V* to use explicitly defined levels::
contour(cube, V)
See :func:`iris.plot.contour` for details of valid keyword arguments.
"""
coords = kwargs.get("coords")
axes = kwargs.get("axes")
result = iplt.contour(cube, *args, **kwargs)
_label_with_points(cube, coords=coords, axes=axes)
return result
def contourf(cube, *args, **kwargs):
"""
Draws filled contours on a labelled plot based on the given Cube.
With the basic call signature, contour "level" values are chosen
automatically::
contour(cube)
Supply a number to use *N* automatically chosen levels::
contour(cube, N)
Supply a sequence *V* to use explicitly defined levels::
contour(cube, V)
See :func:`iris.plot.contourf` for details of valid keyword arguments.
"""
coords = kwargs.get("coords")
axes = kwargs.get("axes")
result = iplt.contourf(cube, *args, **kwargs)
_label_with_points(cube, result, coords=coords, axes=axes)
return result
def outline(cube, coords=None, color="k", linewidth=None, axes=None):
"""
Draws cell outlines on a labelled plot based on the given Cube.
Kwargs:
* coords: list of :class:`~iris.coords.Coord` objects or coordinate names
Use the given coordinates as the axes for the plot. The order of the
given coordinates indicates which axis to use for each, where the first
element is the horizontal axis of the plot and the second element is
the vertical axis of the plot.
* color: None or mpl color
The color of the cell outlines. If None, the matplotlibrc setting
patch.edgecolor is used by default.
* linewidth: None or number
The width of the lines showing the cell outlines. If None, the default
width in patch.linewidth in matplotlibrc is used.
"""
result = iplt.outline(
cube, color=color, linewidth=linewidth, coords=coords, axes=axes
)
_label_with_bounds(cube, coords=coords, axes=axes)
return result
def pcolor(cube, *args, **kwargs):
"""
Draws a labelled pseudocolor plot based on the given Cube.
See :func:`iris.plot.pcolor` for details of valid keyword arguments.
"""
coords = kwargs.get("coords")
axes = kwargs.get("axes")
result = iplt.pcolor(cube, *args, **kwargs)
_label_with_bounds(cube, result, coords=coords, axes=axes)
return result
def pcolormesh(cube, *args, **kwargs):
"""
Draws a labelled pseudocolour plot based on the given Cube.
See :func:`iris.plot.pcolormesh` for details of valid keyword arguments.
"""
coords = kwargs.get("coords")
axes = kwargs.get("axes")
result = iplt.pcolormesh(cube, *args, **kwargs)
_label_with_bounds(cube, result, coords=coords, axes=axes)
return result
def points(cube, *args, **kwargs):
"""
Draws sample point positions on a labelled plot based on the given Cube.
See :func:`iris.plot.points` for details of valid keyword arguments.
"""
coords = kwargs.get("coords")
axes = kwargs.get("axes")
result = iplt.points(cube, *args, **kwargs)
_label_with_points(cube, coords=coords, axes=axes)
return result
def plot(*args, **kwargs):
"""
Draws a labelled line plot based on the given cube(s) or
coordinate(s).
See :func:`iris.plot.plot` for details of valid arguments and
keyword arguments.
"""
axes = kwargs.get("axes")
result = iplt.plot(*args, **kwargs)
_label_1d_plot(*args, axes=axes)
return result
def scatter(x, y, *args, **kwargs):
"""
Draws a labelled scatter plot based on the given cubes or
coordinates.
See :func:`iris.plot.scatter` for details of valid arguments and
keyword arguments.
"""
axes = kwargs.get("axes")
result = iplt.scatter(x, y, *args, **kwargs)
_label_1d_plot(x, y, axes=axes)
return result
# Provide a convenience show method from pyplot.
show = plt.show
| lgpl-3.0 |
igabriel85/dmon-adp | misc/keras_test.py | 1 | 1530 | import numpy
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
import os, sys
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
dataDir = os.path.join(os.path.dirname(os.path.abspath('')), 'data')
# load dataset
dataframe = pd.read_csv(os.path.join(dataDir, 'iris.csv'))
dataset = dataframe.values
X = dataset[:,0:4].astype(float)
Y = dataset[:,4]
# print Y
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = np_utils.to_categorical(encoded_Y)
# print dummy_y
# define baseline model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(8, input_dim=4, activation='relu'))
model.add(Dense(3, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
estimator = KerasClassifier(build_fn=baseline_model, epochs=200, batch_size=20, verbose=1)
kfold = KFold(n_splits=10, shuffle=True, random_state=seed)
results = cross_val_score(estimator, X, dummy_y, cv=kfold)
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100)) | apache-2.0 |
MalkIPP/ipp_work | ipp_work/simulations/ir_marg_rate.py | 1 | 8481 | # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pandas
import logging
import openfisca_france_data
from openfisca_france_data.input_data_builders import get_input_data_frame
from openfisca_france_data.surveys import SurveyScenario
from openfisca_core.rates import average_rate
from ipp_work.utils import from_simulation_to_data_frame_by_entity_key_plural
log = logging.getLogger(__name__)
def from_input_df_to_entity_key_plural_df(input_data_frame, tax_benefit_system, simulation, used_as_input_variables = None):
'''
En entrée il faut:
une input_data_frame
une liste des variables nécessaires et leurs entités => il faut le tax_benefit_system
Objectif: créer une input_data_frame_by_entity_key_plural
Il faut ensuite créer une 2e fonction qui transforme cette df en Array
'''
assert input_data_frame is not None
assert tax_benefit_system is not None
id_variables = [
entity.index_for_person_variable_name for entity in simulation.entity_by_key_singular.values()
if not entity.is_persons_entity]
role_variables = [
entity.role_for_person_variable_name for entity in simulation.entity_by_key_singular.values()
if not entity.is_persons_entity]
column_by_name = tax_benefit_system.column_by_name
# Check 1 (ici ou dans la méthode de classe ?)
for column_name in input_data_frame:
if column_name not in column_by_name:
log.info('Unknown column "{}" in survey, dropped from input table'.format(column_name))
# waiting for the new pandas version to hit Travis repo
input_data_frame = input_data_frame.drop(column_name, axis = 1)
# , inplace = True) # TODO: effet de bords ?
# Check 2 (ici ou dans la méthode de classe ?)
for column_name in input_data_frame:
if column_name in id_variables + role_variables:
continue
#TODO: make that work ? (MG, may 15)
# if column_by_name[column_name].formula_class.function is not None:
# if column_name in column_by_name.used_as_input_variables:
# log.info(
# 'Column "{}" not dropped because present in used_as_input_variabels'.format(column_name))
# continue
#
# log.info('Column "{}" in survey set to be calculated, dropped from input table'.format(column_name))
# input_data_frame = input_data_frame.drop(column_name, axis = 1)
# , inplace = True) # TODO: effet de bords ?
# Work on entities
for entity in simulation.entity_by_key_singular.values():
if entity.is_persons_entity:
entity.count = entity.step_size = len(input_data_frame)
else:
entity.count = entity.step_size = (input_data_frame[entity.role_for_person_variable_name] == 0).sum()
entity.roles_count = input_data_frame[entity.role_for_person_variable_name].max() + 1
# Classify column by entity:
columns_by_entity = {}
columns_by_entity['individu'] = []
columns_by_entity['quifam'] = []
columns_by_entity['quifoy'] = []
columns_by_entity['quimen'] = []
for column_name, column_serie in input_data_frame.iteritems():
holder = simulation.get_or_new_holder(column_name)
entity = holder.entity
if entity.is_persons_entity:
columns_by_entity['individu'].append(column_name)
else:
columns_by_entity[entity.role_for_person_variable_name].append(column_name)
input_data_frame_by_entity_key_plural = {}
for entity in simulation.entity_by_key_singular.values():
if entity.is_persons_entity:
input_data_frame_by_entity_key_plural['individus'] = \
input_data_frame[columns_by_entity['individu']]
entity.count = entity.step_size = len(input_data_frame)
else:
input_data_frame_by_entity_key_plural[entity.index_for_person_variable_name] = \
input_data_frame[columns_by_entity[entity.role_for_person_variable_name]][input_data_frame[entity.role_for_person_variable_name] == 0]
return input_data_frame_by_entity_key_plural
def marginal_rate_survey(df, target = None, target_2 = None, varying = None, varying_2 = None):
# target: numerator, varying: denominator
return 1 - (df[target] - df[target_2]) / (df[varying] - df[varying_2])
def varying_survey_simulation(year = 2009, increment = 10, target = 'irpp', varying = 'rni', used_as_input_variables = None):
TaxBenefitSystem = openfisca_france_data.init_country()
tax_benefit_system = TaxBenefitSystem()
input_data_frame = get_input_data_frame(year)
# Simulation 1 : get varying and target
survey_scenario = SurveyScenario().init_from_data_frame(
input_data_frame = input_data_frame,
used_as_input_variables = used_as_input_variables,
year = year,
tax_benefit_system = tax_benefit_system
)
simulation = survey_scenario.new_simulation(debug = False)
output_data_frame = pandas.DataFrame(
dict([(name, simulation.calculate_add(name)) for name in [
target, varying, 'idfoy_original'
]]))
# Make input_data_frame_by_entity_key_plural from the previous input_data_frame and simulation
input_data_frames_by_entity_key_plural = \
from_input_df_to_entity_key_plural_df(input_data_frame, tax_benefit_system, simulation)
foyers = input_data_frames_by_entity_key_plural['idfoy']
foyers = pandas.merge(foyers, output_data_frame, on = 'idfoy_original')
# Incrementation of varying:
foyers[varying] = foyers[varying] + increment
# On remplace la nouvelle base dans le dictionnaire
input_data_frames_by_entity_key_plural['idfoy'] = foyers
# 2e simulation à partir de input_data_frame_by_entity_key_plural:
# TODO: fix used_as_input_variabels in the from_input_df_to_entity_key_plural_df() function
used_as_input_variables = used_as_input_variables + [varying]
TaxBenefitSystem = openfisca_france_data.init_country()
tax_benefit_system = TaxBenefitSystem()
survey_scenario = SurveyScenario().init_from_data_frame(
input_data_frame = None,
input_data_frames_by_entity_key_plural = input_data_frames_by_entity_key_plural,
used_as_input_variables = used_as_input_variables,
year = year,
tax_benefit_system = tax_benefit_system,
)
simulation = survey_scenario.new_simulation(debug = False)
output_data_frame2 = pandas.DataFrame(
dict([(name, simulation.calculate_add(name)) for name in [
target, varying, 'idfoy_original'
]]))
output_data_frame2.rename(columns = {varying: '{}_2'.format(varying),
target: '{}_2'.format(target)}, inplace = True)
merged = pandas.merge(output_data_frame, output_data_frame2, on = 'idfoy_original')
merged['marginal_rate'] = marginal_rate_survey(merged, '{}'.format(target), '{}_2'.format(target), 'rni', 'rni_2')
merged['average_rate'] = average_rate(target = merged[target], varying = merged[varying])
return merged
if __name__ == '__main__':
import logging
import time
log = logging.getLogger(__name__)
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
start = time.time()
used_as_input_variables = ['salaire_imposable', 'cho', 'rst', 'age_en_mois', 'smic55']
merged = varying_survey_simulation(year = 2009, increment = 10, target = 'irpp', varying = 'rni',
used_as_input_variables = used_as_input_variables)
| agpl-3.0 |
ResByte/graph_slam | scripts/robot.py | 1 | 1487 | #!/usr/bin/env python
import roslib
import rospy
import sys
from geometry_msgs.msg import Twist
import numpy as np
from nav_msgs.msg import Odometry
from tf.transformations import euler_from_quaternion
import matplotlib.pyplot as plt
from sensor_msgs.msg import PointCloud2
import sensor_msgs.point_cloud2 as pc2
import itertools
class Robot():
"""This is a generic robot class to implement various machine learning algorithms and """
def __init__(self):
self.pose = [] #check if this needs to be initialized
rospy.init_node('robot',anonymous = False)
def odomCb(self,msg):
#print msg.pose.pose
quaternion = (msg.pose.pose.orientation.x,msg.pose.pose.orientation.y,msg.pose.pose.orientation.z,msg.pose.pose.orientation.w)
euler = euler_from_quaternion(quaternion)
#print euler
self.pose = [msg.pose.pose.position.x,msg.pose.pose.position.y,euler[2]]
print self.pose[0],self.pose[1],self.pose[2]
def odomSub(self):
rospy.Subscriber('/odom',Odometry,self.odomCb)
def cloudCb(self,data):
#data_out = pc2.read_points(data, field_names=None, skip_nans=False, uvs=[[data.width, data.height]])
#cloud = list(itertools.islice(data_out,0,100))
cloud = np.asarray(data)
print
def cloudSub(self):
rospy.Subscriber('/camera/depth/points',PointCloud2,self.cloudCb)
if __name__ == '__main__':
print "init_node"
try:
robot = Robot()
while not rospy.is_shutdown():
#robot.odomSub()
robot.cloudSub()
except:
rospy.loginfo("node terminated.") | gpl-2.0 |
ywcui1990/nupic.research | projects/vehicle-control/agent/run_sm.py | 6 | 7819 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from collections import defaultdict
import operator
import time
import numpy
from unity_client.server import Server
from nupic.encoders.coordinate import CoordinateEncoder
from nupic.encoders.scalar import ScalarEncoder
from nupic.algorithms.monitor_mixin.trace import CountsTrace
from sensorimotor.extended_temporal_memory import ApicalTiebreakPairMemory
from htmresearch.support.apical_tm_pair_monitor_mixin import (
ApicalTMPairMonitorMixin)
class MonitoredApicalTiebreakPairMemory(
ApicalTMPairMonitorMixin, ApicalTiebreakPairMemory): pass
SCALE = 5
RADIUS = 10
class Agent(object):
def __init__(self):
self.encoder = CoordinateEncoder(n=1024,
w=21)
self.motorEncoder = ScalarEncoder(21, -1, 1,
n=1024)
self.tm = MonitoredApicalTiebreakPairMemory(
columnDimensions=[2048],
basalInputDimensions: (999999,) # Dodge input checking.
cellsPerColumn=1,
initialPermanence=0.5,
connectedPermanence=0.6,
permanenceIncrement=0.1,
permanenceDecrement=0.02,
minThreshold=35,
activationThreshold=35,
maxNewSynapseCount=40)
self.plotter = Plotter(self.tm, showOverlaps=False, showOverlapsValues=False)
self.lastState = None
self.lastAction = None
self.prevMotorPattern = ()
def sync(self, outputData):
if not ("location" in outputData and
"steer" in outputData):
print "Warning: Missing data:", outputData
return
reset = outputData.get("reset") or False
if reset:
print "Reset."
self.tm.reset()
location = outputData["location"]
steer = outputData["steer"]
x = int(location["x"] * SCALE)
z = int(location["z"] * SCALE)
coordinate = numpy.array([x, z])
encoding = self.encoder.encode((coordinate, RADIUS))
motorEncoding = self.motorEncoder.encode(steer)
sensorPattern = set(encoding.nonzero()[0])
motorPattern = set(motorEncoding.nonzero()[0])
self.tm.compute(sensorPattern,
activeCellsExternalBasal=motorPattern,
reinforceCandidatesExternalBasal=self.prevMotorPattern,
growthCandidatesExternalBasal=self.prevMotorPattern)
print self.tm.mmPrettyPrintMetrics(self.tm.mmGetDefaultMetrics())
self.plotter.update(encoding, reset)
if reset:
self.plotter.render()
self.lastState = encoding
self.lastAction = steer
self.prevMotorPattern = motorPattern
class Plotter(object):
def __init__(self, tm, showOverlaps=False, showOverlapsValues=False):
self.tm = tm
self.showOverlaps = showOverlaps
self.showOverlapsValues = showOverlapsValues
self.encodings = []
self.resets = []
self.numSegmentsPerCell = []
self.numSynapsesPerSegment = []
import matplotlib.pyplot as plt
self.plt = plt
import matplotlib.cm as cm
self.cm = cm
from pylab import rcParams
if self.showOverlaps and self.showOverlapsValues:
rcParams.update({'figure.figsize': (20, 20)})
else:
rcParams.update({'figure.figsize': (6, 12)})
rcParams.update({'figure.autolayout': True})
rcParams.update({'figure.facecolor': 'white'})
rcParams.update({'ytick.labelsize': 8})
def update(self, encoding, reset):
self.encodings.append(encoding)
self.resets.append(reset)
# TODO: Deal with empty segments / unconnected synapses
numSegmentsPerCell = [len(segments) for segments in
self.tm.connections._segmentsForCell.values()]
self.numSegmentsPerCell.append(numpy.array(numSegmentsPerCell))
numSynapsesPerSegment = [len(synapses) for synapses in
self.tm.connections._synapsesForSegment.values()]
self.numSynapsesPerSegment.append(numpy.array(numSynapsesPerSegment))
def render(self):
timestamp = int(time.time())
self.plt.figure(1)
self.plt.clf()
self._renderMetrics(timestamp)
if self.showOverlaps:
self.plt.figure(2)
self.plt.clf()
self._renderOverlaps(timestamp)
def _renderMetrics(self, timestamp):
traces = self.tm.mmGetDefaultTraces()
traces = [trace for trace in traces if type(trace) is CountsTrace]
t = len(traces)
n = t + 2
for i in xrange(t):
trace = traces[i]
self.plt.subplot(n, 1, i+1)
self._plot(trace.data, trace.title)
self.plt.subplot(n, 1, t+1)
self._plotDistributions(self.numSegmentsPerCell, "# segments per cell")
self.plt.subplot(n, 1, t+2)
self._plotDistributions(self.numSynapsesPerSegment, "# synapses per segment")
self.plt.draw()
self.plt.savefig("sm-{0}_A.png".format(timestamp))
def _renderOverlaps(self, timestamp):
self.plt.subplot(1, 1, 1)
overlaps = self._computeOverlaps()
self._imshow(overlaps, "Overlaps", aspect=None)
for i in self._computeResetIndices():
self.plt.axvline(i, color='black', alpha=0.5)
self.plt.axhline(i, color='black', alpha=0.5)
if self.showOverlapsValues:
for i in range(len(overlaps)):
for j in range(len(overlaps[i])):
overlap = "%.1f" % overlaps[i][j]
self.plt.annotate(overlap, xy=(i, j), fontsize=6, color='red', verticalalignment='center', horizontalalignment='center')
self.plt.draw()
self.plt.savefig("sm-{0}_B.png".format(timestamp))
def _computeOverlaps(self):
overlaps = []
encodings = self.encodings
for i in range(len(encodings)):
row = []
for j in range(len(encodings)):
n = max(encodings[i].sum(), encodings[j].sum())
overlap = (encodings[i] & encodings[j]).sum() / float(n)
row.append(overlap)
overlaps.append(row)
return overlaps
def _computeResetIndices(self):
return numpy.array(self.resets).nonzero()[0]
def _plot(self, data, title):
self.plt.plot(range(len(data)), data)
self._finishPlot(data, title)
def _finishPlot(self, data, title):
self.plt.title(title)
self.plt.xlim(0, len(data))
for i in self._computeResetIndices():
self.plt.axvline(i, color='black', alpha=0.5)
def _imshow(self, data, title, aspect='auto'):
self.plt.title(title)
self.plt.imshow(data,
cmap=self.cm.Greys,
interpolation="nearest",
aspect=aspect,
vmin=0,
vmax=1)
def _plotDistributions(self, data, title):
means = [numpy.mean(x) if len(x) else 0 for x in data]
maxs = [numpy.max(x) if len(x) else 0 for x in data]
self.plt.plot(range(len(data)), means, label='mean')
self.plt.plot(range(len(data)), maxs, label='max')
self.plt.legend(loc='lower right')
self._finishPlot(data, title)
if __name__ == "__main__":
agent = Agent()
Server(agent)
| agpl-3.0 |
zmlabe/IceVarFigs | Scripts/SeaSurfaceTemperatures/plot_ersst5.py | 1 | 5197 | """
Plot selected years of monthly ERSSTv5 global data
Website : https://www1.ncdc.noaa.gov/pub/data/cmb/ersst/v5/netcdf/
Author : Zachary M. Labe
Date : 22 July 2017
"""
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import numpy as np
import datetime
import nclcmaps as ncm
### Read in data files from server
directoryfigure = './Figures/'
directorydata = './Data/'
### Define constants
now = datetime.datetime.now()
month = now.month
monthsq = [r'Jan',r'Feb',r'Mar',r'Apr',r'May',r'Jun',r'Jul',
r'Aug',r'Sep',r'Oct',r'Nov',r'Dec',r'Jan']
### Input selected years and months!
years = np.arange(1992,2016+1,1)
months = np.arange(1,12+1,1)
### Read in data
sst = np.empty((years.shape[0],months.shape[0],89,180))
for i in range(years.shape[0]):
for j in range(months.shape[0]):
filename = directorydata + 'ersst.v5.%s%02d.nc' % (years[i],
months[j])
data = Dataset(filename)
lats = data.variables['lat'][:]
lons = data.variables['lon'][:]
sst[i,j,:,:] = data.variables['sst'][0,0,:,:]
data.close()
print('Completed: Read %s year!' % years[i])
### Locate missing data
sst[np.where(sst == -999)] = np.nan
### Reshape data
sst = np.reshape(sst,(300,89,180))
### Create list of years for plotting
yearsqq = np.repeat(years,12)
###############################################################################
###############################################################################
###############################################################################
### Plot figure
### Define parameters (dark)
def setcolor(x, color):
for m in x:
for t in x[m][1]:
t.set_color(color)
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
plt.rc('savefig',facecolor='black')
plt.rc('axes',edgecolor='k')
plt.rc('xtick',color='white')
plt.rc('ytick',color='white')
plt.rc('axes',labelcolor='white')
plt.rc('axes',facecolor='black')
### Select map type
style = 'global'
if style == 'ortho':
m = Basemap(projection='ortho',lon_0=-90,
lat_0=70,resolution='l',round=True)
elif style == 'polar':
m = Basemap(projection='npstere',boundinglat=67,lon_0=270,resolution='l',round =True)
elif style == 'global':
m = Basemap(projection='moll',lon_0=0,resolution='l',area_thresh=10000)
### Begin loop of years/months
for i in range(sst.shape[0]):
fig = plt.figure()
ax = plt.subplot(111)
for txt in fig.texts:
txt.set_visible(False)
var = sst[i,:,:]
m.drawmapboundary(fill_color='k')
m.drawcoastlines(color='k',linewidth=0.4)
### Colorbar limits
barlim = np.arange(0,31,5)
### Make the plot continuous
var, lons_cyclic = addcyclic(var, lons)
var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)
lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
x, y = m(lon2d, lat2d)
cs = plt.contourf(x,y,var,np.arange(-1.8,31.1,1),
extend='max')
cmap = ncm.cmap('MPL_gnuplot')
cs.set_cmap(cmap)
t = plt.annotate(r'\textbf{%s}' % yearsqq[i],textcoords='axes fraction',
xy=(0,0), xytext=(0.34,1.03),
fontsize=50,color='w',alpha=0.6)
t1 = plt.annotate(r'\textbf{GRAPHIC}: Zachary Labe (@ZLabe)',
textcoords='axes fraction',
xy=(0,0), xytext=(0.02,-0.167),
fontsize=4.5,color='w',alpha=0.6)
t2 = plt.annotate(r'\textbf{SOURCE}: https://www1.ncdc.noaa.gov/',
textcoords='axes fraction',
xy=(0,0), xytext=(0.02,-0.197),
fontsize=4.5,color='w',alpha=0.6)
t3 = plt.annotate(r'\textbf{DATA}: NOAA ERSSTv5, Huang et al. (2017)',
textcoords='axes fraction',
xy=(0,0), xytext=(0.02,-0.227),
fontsize=4.5,color='w',alpha=0.6)
t4 = plt.annotate(r'\textbf{SEA SURFACE TEMPERATURES}',
textcoords='axes fraction',
xy=(0,0), xytext=(0.24,-0.036),fontsize=13,color='w',alpha=0.6)
m.fillcontinents(color='k')
cbar = plt.colorbar(cs,drawedges=False,orientation='horizontal',
pad = 0.04,fraction=0.035)
cbar.set_ticks(barlim)
cbar.set_ticklabels(list(map(str,barlim)))
cbar.set_label(r'\textbf{$\bf{^\circ}$\textbf{C}}',fontsize=13,
color='w')
cbar.ax.tick_params(axis='x', size=.001)
cbar.ax.tick_params(labelsize=6)
plt.subplots_adjust(bottom=0.2)
### Save figure to create animation using ImageMagick
if i < 10:
plt.savefig(directoryfigure + 'sstq_00%s.png' % (i),
dpi=200)
elif i < 100:
plt.savefig(directoryfigure + 'sstq_0%s.png' % (i),
dpi=200)
else:
plt.savefig(directoryfigure + 'sstq_%s.png' % (i),
dpi=200)
### Remove text for each figure
t.remove()
t1.remove()
t2.remove()
t3.remove()
t4.remove() | mit |
RachitKansal/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
tawsifkhan/scikit-learn | sklearn/linear_model/omp.py | 127 | 30417 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=Gram.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False,
return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
coef_ : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_mean, y_mean, X_std)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues: array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_features, n_targets)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, X, y, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
| bsd-3-clause |
rahiel/shellstats | shellstats.py | 1 | 3629 | # -*- coding: utf-8 -*-
from __future__ import division
from os import getenv
from os.path import isfile
from sys import exit
import click
@click.command()
@click.option("--n", default=10, help="How many commands to show.")
@click.option("--plot", is_flag=True, help="Plot command usage in pie chart.")
@click.option("--command", default=None,
help="Most frequent subcommands for command, e.g. sudo, git.")
@click.option("--history-file", type=click.Path(exists=True, readable=True),
default=None, help="Read shell history from history-file.")
@click.option("--shell", default=None,
help="Specify shell history format: bash, fish or zsh.")
def main(n, plot, command, history_file, shell):
"""Print the most frequently used shell commands."""
history = get_history(history_file, shell, command)
commands = {}
for line in history:
cmd = line.split()
if cmd[0] in commands:
commands[cmd[0]] += 1
else:
commands[cmd[0]] = 1
total = len(history)
# counts :: [(command, num_occurance)]
counts = sorted(commands.items(), key=lambda x: x[1], reverse=True)
print_top(n, counts, total)
if plot:
pie_top(n, counts, command)
return counts
def pie_top(n, counts, command):
"""Show a pie chart of n most used commands."""
try:
import matplotlib.pyplot as plt
except ImportError:
click.echo(click.style("Please install matplotlib for plotting.", fg="red"))
exit()
label, x = zip(*counts[:n])
fig = plt.figure()
fig.canvas.set_window_title("ShellStats")
plt.axes(aspect=1)
if command:
title = "Top {0} used {1} subcommands.".format(min(n, len(counts)), command)
else:
title = "Top {0} used shell commands.".format(min(n, len(counts)))
plt.title(title)
plt.pie(x, labels=label)
plt.show()
def print_top(n, counts, total):
"""Print the top n used commands."""
click.echo("{:>3} {:<20} {:<10} {:<3}"
.format('', "Command", "Count", "Percentage"))
# min for when history is too small
for i in min(range(n), range(len(counts)), key=len):
cmd, count = counts[i]
click.echo("{i:>3} {cmd:<20} {count:<10} {percent:<3.3}%"
.format(i=i+1, cmd=cmd, count=count,
percent=count / total * 100))
def get_history(history_file, shell, command):
"""Get usage history for the shell in use."""
shell = shell or getenv("SHELL").split('/')[-1]
if history_file is None:
home = getenv("HOME") + '/'
hist_files = {"bash": [".bash_history"],
"fish": [".config/fish/fish_history"],
"zsh": [".zhistory", ".zsh_history"]}
if shell in hist_files:
for hist_file in hist_files[shell]:
if isfile(home + hist_file):
history_file = home + hist_file
if not history_file:
click.echo(click.style("Shell history file not found.", fg="red"))
exit()
with open(history_file, 'r') as h:
history = [l.strip() for l in h.readlines() if l.strip()]
if shell == "fish":
history = [l[7:] for l in history if l.startswith("- cmd:")]
elif shell == "zsh":
hist = []
for l in history:
if l.startswith(": "):
hist.append(l.split(';', 1)[-1])
else:
hist.append(l)
history = hist
if command:
history = [l[len(command) + 1:] for l in history if l.startswith(str(command))]
return history
| mit |
cbertinato/pandas | pandas/tests/frame/test_axis_select_reindex.py | 1 | 44030 | from datetime import datetime
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
class TestDataFrameSelectReindex(TestData):
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
def test_drop_names(self):
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
df.index.name, df.columns.name = 'first', 'second'
df_dropped_b = df.drop('b')
df_dropped_e = df.drop('e', axis=1)
df_inplace_b, df_inplace_e = df.copy(), df.copy()
df_inplace_b.drop('b', inplace=True)
df_inplace_e.drop('e', axis=1, inplace=True)
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
assert obj.index.name == 'first'
assert obj.columns.name == 'second'
assert list(df.columns) == ['d', 'e', 'f']
msg = r"\['g'\] not found in axis"
with pytest.raises(KeyError, match=msg):
df.drop(['g'])
with pytest.raises(KeyError, match=msg):
df.drop(['g'], 1)
# errors = 'ignore'
dropped = df.drop(['g'], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
tm.assert_index_equal(dropped.index, expected)
dropped = df.drop(['b', 'g'], errors='ignore')
expected = Index(['a', 'c'], name='first')
tm.assert_index_equal(dropped.index, expected)
dropped = df.drop(['g'], axis=1, errors='ignore')
expected = Index(['d', 'e', 'f'], name='second')
tm.assert_index_equal(dropped.columns, expected)
dropped = df.drop(['d', 'g'], axis=1, errors='ignore')
expected = Index(['e', 'f'], name='second')
tm.assert_index_equal(dropped.columns, expected)
# GH 16398
dropped = df.drop([], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
tm.assert_index_equal(dropped.index, expected)
def test_drop_col_still_multiindex(self):
arrays = [['a', 'b', 'c', 'top'],
['', '', '', 'OD'],
['', '', '', 'wx']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(3, 4), columns=index)
del df[('a', '', '')]
assert(isinstance(df.columns, MultiIndex))
def test_drop(self):
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
assert_frame_equal(simple.drop("A", axis=1), simple[['B']])
assert_frame_equal(simple.drop(["A", "B"], axis='columns'),
simple[[]])
assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
assert_frame_equal(simple.drop(
[0, 3], axis='index'), simple.loc[[1, 2], :])
with pytest.raises(KeyError, match=r"\[5\] not found in axis"):
simple.drop(5)
with pytest.raises(KeyError, match=r"\['C'\] not found in axis"):
simple.drop('C', 1)
with pytest.raises(KeyError, match=r"\[5\] not found in axis"):
simple.drop([1, 5])
with pytest.raises(KeyError, match=r"\['C'\] not found in axis"):
simple.drop(['A', 'C'], 1)
# errors = 'ignore'
assert_frame_equal(simple.drop(5, errors='ignore'), simple)
assert_frame_equal(simple.drop([0, 5], errors='ignore'),
simple.loc[[1, 2, 3], :])
assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple)
assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'),
simple[['B']])
# non-unique - wheee!
nu_df = DataFrame(list(zip(range(3), range(-3, 1), list('abc'))),
columns=['a', 'a', 'b'])
assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])
assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])
assert_frame_equal(nu_df.drop([]), nu_df) # GH 16398
nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))
nu_df.columns = list('abc')
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.loc[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.loc[[], :])
# inplace cache issue
# GH 5628
df = pd.DataFrame(np.random.randn(10, 3), columns=list('abc'))
expected = df[~(df.b > 0)]
df.drop(labels=df[df.b > 0].index, inplace=True)
assert_frame_equal(df, expected)
def test_drop_multiindex_not_lexsorted(self):
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
assert lexsorted_df.columns.is_lexsorted()
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3],
[1, 'b2', 'c2', 4]])
not_lexsorted_df = not_lexsorted_df.pivot_table(
index='a', columns=['b', 'c'], values='d')
not_lexsorted_df = not_lexsorted_df.reset_index()
assert not not_lexsorted_df.columns.is_lexsorted()
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.drop('a', axis=1)
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.drop('a', axis=1)
tm.assert_frame_equal(result, expected)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's (GH12392)
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
res1 = df.drop('a')
res2 = df.drop(index='a')
tm.assert_frame_equal(res1, res2)
res1 = df.drop('d', 1)
res2 = df.drop(columns='d')
tm.assert_frame_equal(res1, res2)
res1 = df.drop(labels='e', axis=1)
res2 = df.drop(columns='e')
tm.assert_frame_equal(res1, res2)
res1 = df.drop(['a'], axis=0)
res2 = df.drop(index=['a'])
tm.assert_frame_equal(res1, res2)
res1 = df.drop(['a'], axis=0).drop(['d'], axis=1)
res2 = df.drop(index=['a'], columns=['d'])
tm.assert_frame_equal(res1, res2)
with pytest.raises(ValueError):
df.drop(labels='a', index='b')
with pytest.raises(ValueError):
df.drop(labels='a', columns='b')
with pytest.raises(ValueError):
df.drop(axis=1)
def test_merge_join_different_levels(self):
# GH 9455
# first dataframe
df1 = DataFrame(columns=['a', 'b'], data=[[1, 11], [0, 22]])
# second dataframe
columns = MultiIndex.from_tuples([('a', ''), ('c', 'c1')])
df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])
# merge
columns = ['a', 'b', ('c', 'c1')]
expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]])
with tm.assert_produces_warning(UserWarning):
result = pd.merge(df1, df2, on='a')
tm.assert_frame_equal(result, expected)
# join, see discussion in GH 12219
columns = ['a', 'b', ('a', ''), ('c', 'c1')]
expected = DataFrame(columns=columns,
data=[[1, 11, 0, 44], [0, 22, 1, 33]])
with tm.assert_produces_warning(UserWarning):
result = df1.join(df2, on='a')
tm.assert_frame_equal(result, expected)
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.columns:
for idx, val in newFrame[col].items():
if idx in self.frame.index:
if np.isnan(val):
assert np.isnan(self.frame[col][idx])
else:
assert val == self.frame[col][idx]
else:
assert np.isnan(val)
for col, series in newFrame.items():
assert tm.equalContents(series.index, newFrame.index)
emptyFrame = self.frame.reindex(Index([]))
assert len(emptyFrame.index) == 0
# Cython code should be unit-tested directly
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.columns:
for idx, val in nonContigFrame[col].items():
if idx in self.frame.index:
if np.isnan(val):
assert np.isnan(self.frame[col][idx])
else:
assert val == self.frame[col][idx]
else:
assert np.isnan(val)
for col, series in nonContigFrame.items():
assert tm.equalContents(series.index, nonContigFrame.index)
# corner cases
# Same index, copies values but not index if copy=False
newFrame = self.frame.reindex(self.frame.index, copy=False)
assert newFrame.index is self.frame.index
# length zero
newFrame = self.frame.reindex([])
assert newFrame.empty
assert len(newFrame.columns) == len(self.frame.columns)
# length zero with columns reindexed with non-empty index
newFrame = self.frame.reindex([])
newFrame = newFrame.reindex(self.frame.index)
assert len(newFrame.index) == len(self.frame.index)
assert len(newFrame.columns) == len(self.frame.columns)
# pass non-Index
newFrame = self.frame.reindex(list(self.ts1.index))
tm.assert_index_equal(newFrame.index, self.ts1.index)
# copy with no axes
result = self.frame.reindex()
assert_frame_equal(result, self.frame)
assert result is not self.frame
def test_reindex_nan(self):
df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5],
columns=['joe', 'jim'])
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype('object')
assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = pd.DataFrame({'other': ['a', 'b', np.nan, 'c'],
'date': ['2015-03-22', np.nan,
'2012-01-08', np.nan],
'amount': [2, 3, 4, 5]})
df['date'] = pd.to_datetime(df.date)
df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1)
left = df.set_index(['delta', 'other', 'date']).reset_index()
right = df.reindex(columns=['delta', 'other', 'date', 'amount'])
assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(np.random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(i)
assert df.index.name == 'iname'
df = df.reindex(Index(np.arange(10), name='tmpname'))
assert df.index.name == 'tmpname'
s = Series(np.random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(columns=i)
assert df.columns.name == 'iname'
def test_reindex_int(self):
smaller = self.intframe.reindex(self.intframe.index[::2])
assert smaller['A'].dtype == np.int64
bigger = smaller.reindex(self.intframe.index)
assert bigger['A'].dtype == np.float64
smaller = self.intframe.reindex(columns=['A', 'B'])
assert smaller['A'].dtype == np.int64
def test_reindex_like(self):
other = self.frame.reindex(index=self.frame.index[:10],
columns=['C', 'B'])
assert_frame_equal(other, self.frame.reindex_like(other))
def test_reindex_columns(self):
new_frame = self.frame.reindex(columns=['A', 'B', 'E'])
tm.assert_series_equal(new_frame['B'], self.frame['B'])
assert np.isnan(new_frame['E']).all()
assert 'C' not in new_frame
# Length zero
new_frame = self.frame.reindex(columns=[])
assert new_frame.empty
def test_reindex_columns_method(self):
# GH 14992, reindexing over columns ignored method
df = DataFrame(data=[[11, 12, 13], [21, 22, 23], [31, 32, 33]],
index=[1, 2, 4],
columns=[1, 2, 4],
dtype=float)
# default method
result = df.reindex(columns=range(6))
expected = DataFrame(data=[[np.nan, 11, 12, np.nan, 13, np.nan],
[np.nan, 21, 22, np.nan, 23, np.nan],
[np.nan, 31, 32, np.nan, 33, np.nan]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
assert_frame_equal(result, expected)
# method='ffill'
result = df.reindex(columns=range(6), method='ffill')
expected = DataFrame(data=[[np.nan, 11, 12, 12, 13, 13],
[np.nan, 21, 22, 22, 23, 23],
[np.nan, 31, 32, 32, 33, 33]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
assert_frame_equal(result, expected)
# method='bfill'
result = df.reindex(columns=range(6), method='bfill')
expected = DataFrame(data=[[11, 11, 12, 13, 13, np.nan],
[21, 21, 22, 23, 23, np.nan],
[31, 31, 32, 33, 33, np.nan]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
assert_frame_equal(result, expected)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
df = DataFrame(np.ones((3, 3)),
index=[datetime(2012, 1, 1),
datetime(2012, 1, 2),
datetime(2012, 1, 3)],
columns=['a', 'b', 'c'])
time_freq = date_range('2012-01-01', '2012-01-03', freq='d')
some_cols = ['a', 'b']
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(
columns=some_cols).index.freq
assert index_freq == both_freq
assert index_freq == seq_freq
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(list(range(15)))
assert np.isnan(result.values[-5:]).all()
result = df.reindex(range(15), fill_value=0)
expected = df.reindex(range(15)).fillna(0)
assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=range(5), fill_value=0.)
expected = df.copy()
expected[4] = 0.
assert_frame_equal(result, expected)
result = df.reindex(columns=range(5), fill_value=0)
expected = df.copy()
expected[4] = 0
assert_frame_equal(result, expected)
result = df.reindex(columns=range(5), fill_value='foo')
expected = df.copy()
expected[4] = 'foo'
assert_frame_equal(result, expected)
# reindex_axis
with tm.assert_produces_warning(FutureWarning):
result = df.reindex_axis(range(15), fill_value=0., axis=0)
expected = df.reindex(range(15)).fillna(0)
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.reindex_axis(range(5), fill_value=0., axis=1)
expected = df.reindex(columns=range(5)).fillna(0)
assert_frame_equal(result, expected)
# other dtypes
df['foo'] = 'foo'
result = df.reindex(range(15), fill_value=0)
expected = df.reindex(range(15)).fillna(0)
assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr, index=list(range(len(df))))
assert_frame_equal(result, expected)
# reindex fails
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df.reindex(index=list(range(len(df))))
def test_reindex_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = pd.DataFrame({"A": [1, 2, np.nan], "B": [4, 5, np.nan]},
index=[0, 1, 3])
result = df.reindex([0, 1, 3])
assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis=0)
assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis='index')
assert_frame_equal(result, expected)
def test_reindex_positional_warns(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = pd.DataFrame({"A": [1., 2], 'B': [4., 5],
"C": [np.nan, np.nan]})
with tm.assert_produces_warning(FutureWarning):
result = df.reindex([0, 1], ['A', 'B', 'C'])
assert_frame_equal(result, expected)
def test_reindex_axis_style_raises(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], 'B': [4, 5, 6]})
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex([0, 1], ['A'], axis=1)
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex([0, 1], ['A'], axis='index')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='index')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='columns')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(columns=[0, 1], axis='columns')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], columns=[0, 1], axis='columns')
with pytest.raises(TypeError, match='Cannot specify all'):
df.reindex([0, 1], [0], ['A'])
# Mixing styles
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='index')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='columns')
# Duplicates
with pytest.raises(TypeError, match="multiple values"):
df.reindex([0, 1], labels=[0, 1])
def test_reindex_single_named_indexer(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [1, 2, 3]})
result = df.reindex([0, 1], columns=['A'])
expected = pd.DataFrame({"A": [1, 2]})
assert_frame_equal(result, expected)
def test_reindex_api_equivalence(self):
# https://github.com/pandas-dev/pandas/issues/12392
# equivalence of the labels/axis and index/columns API's
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
res1 = df.reindex(['b', 'a'])
res2 = df.reindex(index=['b', 'a'])
res3 = df.reindex(labels=['b', 'a'])
res4 = df.reindex(labels=['b', 'a'], axis=0)
res5 = df.reindex(['b', 'a'], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=['e', 'd'])
res2 = df.reindex(['e', 'd'], axis=1)
res3 = df.reindex(labels=['e', 'd'], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
with tm.assert_produces_warning(FutureWarning) as m:
res1 = df.reindex(['b', 'a'], ['e', 'd'])
assert 'reindex' in str(m[0].message)
res2 = df.reindex(columns=['e', 'd'], index=['b', 'a'])
res3 = df.reindex(labels=['b', 'a'], axis=0).reindex(labels=['e', 'd'],
axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_align(self):
af, bf = self.frame.align(self.frame)
assert af._data is not self.frame._data
af, bf = self.frame.align(self.frame, copy=False)
assert af._data is self.frame._data
# axis = 0
other = self.frame.iloc[:-5, :3]
af, bf = self.frame.align(other, axis=0, fill_value=-1)
tm.assert_index_equal(bf.columns, other.columns)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
assert (diff_a_vals == -1).all()
af, bf = self.frame.align(other, join='right', axis=0)
tm.assert_index_equal(bf.columns, other.columns)
tm.assert_index_equal(bf.index, other.index)
tm.assert_index_equal(af.index, other.index)
# axis = 1
other = self.frame.iloc[:-5, :3].copy()
af, bf = self.frame.align(other, axis=1)
tm.assert_index_equal(bf.columns, self.frame.columns)
tm.assert_index_equal(bf.index, other.index)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
# TODO(wesm): unused?
diff_b_vals = bf.reindex(diff_b).values # noqa
assert (diff_a_vals == -1).all()
af, bf = self.frame.align(other, join='inner', axis=1)
tm.assert_index_equal(bf.columns, other.columns)
af, bf = self.frame.align(other, join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, other.columns)
# test other non-float types
af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, other.columns)
af, bf = self.mixed_frame.align(self.mixed_frame,
join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, self.mixed_frame.columns)
af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=None)
tm.assert_index_equal(bf.index, Index([]))
af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
# mixed floats/ints
af, bf = self.mixed_float.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
af, bf = self.mixed_int.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
# Try to align DataFrame to Series along bad axis
with pytest.raises(ValueError):
self.frame.align(af.iloc[0, :3], join='inner', axis=2)
# align dataframe to series with broadcast or not
idx = self.frame.index
s = Series(range(len(idx)), index=idx)
left, right = self.frame.align(s, axis=0)
tm.assert_index_equal(left.index, self.frame.index)
tm.assert_index_equal(right.index, self.frame.index)
assert isinstance(right, Series)
left, right = self.frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, self.frame.index)
expected = {c: s for c in self.frame.columns}
expected = DataFrame(expected, index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(right, expected)
# see gh-9558
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
result = df[df['a'] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
result = df.where(df['a'] == 2, 0)
expected = DataFrame({'a': [0, 2, 0], 'b': [0, 5, 0]})
tm.assert_frame_equal(result, expected)
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,
fill_axis=fill_axis)
join_index, join_columns = None, None
ea, eb = a, b
if axis is None or axis == 0:
join_index = a.index.join(b.index, how=how)
ea = ea.reindex(index=join_index)
eb = eb.reindex(index=join_index)
if axis is None or axis == 1:
join_columns = a.columns.join(b.columns, how=how)
ea = ea.reindex(columns=join_columns)
eb = eb.reindex(columns=join_columns)
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
assert_frame_equal(aa, ea)
assert_frame_equal(ab, eb)
@pytest.mark.parametrize('meth', ['pad', 'bfill'])
@pytest.mark.parametrize('ax', [0, 1, None])
@pytest.mark.parametrize('fax', [0, 1])
@pytest.mark.parametrize('how', ['inner', 'outer', 'left', 'right'])
def test_align_fill_method(self, how, meth, ax, fax):
self._check_align_fill(how, meth, ax, fax)
def _check_align_fill(self, kind, meth, ax, fax):
left = self.frame.iloc[0:4, :10]
right = self.frame.iloc[2:, 6:]
empty = self.frame.iloc[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty left
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty right
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# both empty
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
def test_align_int_fill_bug(self):
# GH #910
X = np.arange(10 * 10, dtype='float64').reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1['0.X'] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
assert_frame_equal(result, expected)
def test_align_multiindex(self):
# GH 10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
df1 = pd.DataFrame(np.arange(12, dtype='int64'), index=midx)
df2 = pd.DataFrame(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join='left')
res2l, res2r = df2.align(df1, join='right')
expl = df1
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join='right')
res2l, res2r = df2.align(df1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
def test_align_series_combinations(self):
df = pd.DataFrame({'a': [1, 3, 5],
'b': [1, 3, 5]}, index=list('ACE'))
s = pd.Series([1, 2, 4], index=list('ABD'), name='x')
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = pd.DataFrame({'a': [1, np.nan, 3, np.nan, 5],
'b': [1, np.nan, 3, np.nan, 5]},
index=list('ABCDE'))
exp2 = pd.Series([1, 2, np.nan, 4, np.nan],
index=list('ABCDE'), name='x')
tm.assert_frame_equal(res1, exp1)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
tm.assert_series_equal(res1, exp2)
tm.assert_frame_equal(res2, exp1)
def test_filter(self):
# Items
filtered = self.frame.filter(['A', 'B', 'E'])
assert len(filtered.columns) == 2
assert 'E' not in filtered
filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')
assert len(filtered.columns) == 2
assert 'E' not in filtered
# Other axis
idx = self.frame.index[0:4]
filtered = self.frame.filter(idx, axis='index')
expected = self.frame.reindex(index=idx)
tm.assert_frame_equal(filtered, expected)
# like
fcopy = self.frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(like='A')
assert len(filtered.columns) == 2
assert 'AA' in filtered
# like with ints in column names
df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])
filtered = df.filter(like='_')
assert len(filtered.columns) == 2
# regex with ints in column names
# from PR #10384
df = DataFrame(0., index=[0, 1, 2], columns=['A1', 1, 'B', 2, 'C'])
expected = DataFrame(
0., index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object))
filtered = df.filter(regex='^[0-9]+$')
tm.assert_frame_equal(filtered, expected)
expected = DataFrame(0., index=[0, 1, 2], columns=[0, '0', 1, '1'])
# shouldn't remove anything
filtered = expected.filter(regex='^[0-9]+$')
tm.assert_frame_equal(filtered, expected)
# pass in None
with pytest.raises(TypeError, match='Must pass'):
self.frame.filter()
with pytest.raises(TypeError, match='Must pass'):
self.frame.filter(items=None)
with pytest.raises(TypeError, match='Must pass'):
self.frame.filter(axis=1)
# test mutually exclusive arguments
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], regex='e$', like='bbi')
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], regex='e$', axis=1)
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], regex='e$')
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], like='bbi', axis=0)
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], like='bbi')
# objects
filtered = self.mixed_frame.filter(like='foo')
assert 'foo' in filtered
# unicode columns, won't ascii-encode
df = self.frame.rename(columns={'B': '\u2202'})
filtered = df.filter(like='C')
assert 'C' in filtered
def test_filter_regex_search(self):
fcopy = self.frame.copy()
fcopy['AA'] = 1
# regex
filtered = fcopy.filter(regex='[A]+')
assert len(filtered.columns) == 2
assert 'AA' in filtered
# doesn't have to be at beginning
df = DataFrame({'aBBa': [1, 2],
'BBaBB': [1, 2],
'aCCa': [1, 2],
'aCCaBB': [1, 2]})
result = df.filter(regex='BB')
exp = df[[x for x in df.columns if 'BB' in x]]
assert_frame_equal(result, exp)
@pytest.mark.parametrize('name,expected', [
('a', DataFrame({'a': [1, 2]})),
('a', DataFrame({'a': [1, 2]})),
('あ', DataFrame({'あ': [3, 4]}))
])
def test_filter_unicode(self, name, expected):
# GH13101
df = DataFrame({'a': [1, 2], 'あ': [3, 4]})
assert_frame_equal(df.filter(like=name), expected)
assert_frame_equal(df.filter(regex=name), expected)
@pytest.mark.parametrize('name', ['a', 'a'])
def test_filter_bytestring(self, name):
# GH13101
df = DataFrame({b'a': [1, 2], b'b': [3, 4]})
expected = DataFrame({b'a': [1, 2]})
assert_frame_equal(df.filter(like=name), expected)
assert_frame_equal(df.filter(regex=name), expected)
def test_filter_corner(self):
empty = DataFrame()
result = empty.filter([])
assert_frame_equal(result, empty)
result = empty.filter(like='foo')
assert_frame_equal(result, empty)
def test_take(self):
# homogeneous
order = [3, 1, 2, 0]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['D', 'B', 'C', 'A']]
assert_frame_equal(result, expected, check_names=False)
# negative indices
order = [2, 1, -1]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.take(order, convert=True, axis=0)
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.take(order, convert=False, axis=0)
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['C', 'B', 'D']]
assert_frame_equal(result, expected, check_names=False)
# illegal indices
msg = "indices are out-of-bounds"
with pytest.raises(IndexError, match=msg):
df.take([3, 1, 2, 30], axis=0)
with pytest.raises(IndexError, match=msg):
df.take([3, 1, 2, -31], axis=0)
with pytest.raises(IndexError, match=msg):
df.take([3, 1, 2, 5], axis=1)
with pytest.raises(IndexError, match=msg):
df.take([3, 1, 2, -5], axis=1)
# mixed-dtype
order = [4, 1, 2, 0, 3]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['foo', 'B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
# negative indices
order = [4, 1, -2]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['foo', 'B', 'D']]
assert_frame_equal(result, expected)
# by dtype
order = [1, 2, 0, 3]
for df in [self.mixed_float, self.mixed_int]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
def test_reindex_boolean(self):
frame = DataFrame(np.ones((10, 2), dtype=bool),
index=np.arange(0, 20, 2),
columns=[0, 2])
reindexed = frame.reindex(np.arange(10))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[0][1])
reindexed = frame.reindex(columns=range(3))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[1]).all()
def test_reindex_objects(self):
reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])
assert 'foo' in reindexed
reindexed = self.mixed_frame.reindex(columns=['A', 'B'])
assert 'foo' not in reindexed
def test_reindex_corner(self):
index = Index(['a', 'b', 'c'])
dm = self.empty.reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
tm.assert_index_equal(reindexed.columns, index)
# ints are weird
smaller = self.intframe.reindex(columns=['A', 'B', 'E'])
assert smaller['E'].dtype == np.float64
def test_reindex_axis(self):
cols = ['A', 'B', 'E']
with tm.assert_produces_warning(FutureWarning) as m:
reindexed1 = self.intframe.reindex_axis(cols, axis=1)
assert 'reindex' in str(m[0].message)
reindexed2 = self.intframe.reindex(columns=cols)
assert_frame_equal(reindexed1, reindexed2)
rows = self.intframe.index[0:5]
with tm.assert_produces_warning(FutureWarning) as m:
reindexed1 = self.intframe.reindex_axis(rows, axis=0)
assert 'reindex' in str(m[0].message)
reindexed2 = self.intframe.reindex(index=rows)
assert_frame_equal(reindexed1, reindexed2)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
self.intframe.reindex_axis(rows, axis=2)
# no-op case
cols = self.frame.columns.copy()
with tm.assert_produces_warning(FutureWarning) as m:
newFrame = self.frame.reindex_axis(cols, axis=1)
assert 'reindex' in str(m[0].message)
assert_frame_equal(newFrame, self.frame)
def test_reindex_with_nans(self):
df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
columns=['a', 'b'],
index=[100.0, 101.0, np.nan, 102.0, 103.0])
result = df.reindex(index=[101.0, 102.0, 103.0])
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[103.0])
expected = df.iloc[[4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[101.0])
expected = df.iloc[[1]]
assert_frame_equal(result, expected)
def test_reindex_multi(self):
df = DataFrame(np.random.randn(3, 3))
result = df.reindex(index=range(4), columns=range(4))
expected = df.reindex(list(range(4))).reindex(columns=range(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(index=range(4), columns=range(4))
expected = df.reindex(list(range(4))).reindex(columns=range(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(index=range(2), columns=range(2))
expected = df.reindex(range(2)).reindex(columns=range(2))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randn(5, 3) + 1j, columns=['a', 'b', 'c'])
result = df.reindex(index=[0, 1], columns=['a', 'b'])
expected = df.reindex([0, 1]).reindex(columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_reindex_multi_categorical_time(self):
# https://github.com/pandas-dev/pandas/issues/21390
midx = pd.MultiIndex.from_product(
[Categorical(['a', 'b', 'c']),
Categorical(date_range("2012-01-01", periods=3, freq='H'))])
df = pd.DataFrame({'a': range(len(midx))}, index=midx)
df2 = df.iloc[[0, 1, 2, 3, 4, 5, 6, 8]]
result = df2.reindex(midx)
expected = pd.DataFrame(
{'a': [0, 1, 2, 3, 4, 5, 6, np.nan, 8]}, index=midx)
assert_frame_equal(result, expected)
data = [[1, 2, 3], [1, 2, 3]]
@pytest.mark.parametrize('actual', [
DataFrame(data=data, index=['a', 'a']),
DataFrame(data=data, index=['a', 'b']),
DataFrame(data=data, index=['a', 'b']).set_index([0, 1]),
DataFrame(data=data, index=['a', 'a']).set_index([0, 1])
])
def test_raise_on_drop_duplicate_index(self, actual):
# issue 19186
level = 0 if isinstance(actual.index, MultiIndex) else None
with pytest.raises(KeyError):
actual.drop('c', level=level, axis=0)
with pytest.raises(KeyError):
actual.T.drop('c', level=level, axis=1)
expected_no_err = actual.drop('c', axis=0, level=level,
errors='ignore')
assert_frame_equal(expected_no_err, actual)
expected_no_err = actual.T.drop('c', axis=1, level=level,
errors='ignore')
assert_frame_equal(expected_no_err.T, actual)
@pytest.mark.parametrize('index', [[1, 2, 3], [1, 1, 2]])
@pytest.mark.parametrize('drop_labels', [[], [1], [2]])
def test_drop_empty_list(self, index, drop_labels):
# GH 21494
expected_index = [i for i in index if i not in drop_labels]
frame = pd.DataFrame(index=index).drop(drop_labels)
tm.assert_frame_equal(frame, pd.DataFrame(index=expected_index))
@pytest.mark.parametrize('index', [[1, 2, 3], [1, 2, 2]])
@pytest.mark.parametrize('drop_labels', [[1, 4], [4, 5]])
def test_drop_non_empty_list(self, index, drop_labels):
# GH 21494
with pytest.raises(KeyError, match='not found in axis'):
pd.DataFrame(index=index).drop(drop_labels)
| bsd-3-clause |
lpeska/BRDTI | netlaprls.py | 1 | 2811 | '''
We base the NetLapRLS implementation on the one from PyDTI project, https://github.com/stephenliu0423/PyDTI, changes were made to the evaluation procedure
[1] Xia, Zheng, et al. "Semi-supervised drug-protein interaction prediction from heterogeneous biological spaces." BMC systems biology 4.Suppl 2 (2010): S6.
Default parameters:
gamma_d = 0.01, gamma_d=gamma_d2/gamma_d1
gamma_t = 0.01, gamma_t=gamma_p2/gamma_p1
beta_d = 0.3
beta_t = 0.3
'''
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import auc
from functions import normalized_discounted_cummulative_gain
class NetLapRLS:
def __init__(self, gamma_d=0.01, gamma_t=0.01, beta_d=0.3, beta_t=0.3):
self.gamma_d = float(gamma_d)
self.gamma_t = float(gamma_t)
self.beta_d = float(beta_d)
self.beta_t = float(beta_t)
def fix_model(self, W, intMat, drugMat, targetMat, seed=None):
R = W*intMat
m, n = R.shape
drugMat = (drugMat+drugMat.T)/2
targetMat = (targetMat+targetMat.T)/2
Wd = (drugMat+self.gamma_d*np.dot(R, R.T))/(1.0+self.gamma_d)
Wt = (targetMat+self.gamma_t*np.dot(R.T, R))/(1.0+self.gamma_t)
Wd = Wd-np.diag(np.diag(Wd))
Wt = Wt-np.diag(np.diag(Wt))
D = np.diag(np.sqrt(1.0/np.sum(Wd, axis=1)))
Ld = np.eye(m) - np.dot(np.dot(D, Wd), D)
D = np.diag(np.sqrt(1.0/np.sum(Wt, axis=1)))
Lt = np.eye(n) - np.dot(np.dot(D, Wt), D)
X = np.linalg.inv(Wd+self.beta_d*np.dot(Ld, Wd))
Fd = np.dot(np.dot(Wd, X), R)
X = np.linalg.inv(Wt+self.beta_t*np.dot(Lt, Wt))
Ft = np.dot(np.dot(Wt, X), R.T)
self.predictR = 0.5*(Fd+Ft.T)
def predict_scores(self, test_data, N):
inx = np.array(test_data)
return self.predictR[inx[:, 0], inx[:, 1]]
def evaluation(self, test_data, test_label):
scores = self.predictR[test_data[:, 0], test_data[:, 1]]
self.scores = scores
x, y = test_data[:, 0], test_data[:, 1]
test_data_T = np.column_stack((y,x))
ndcg = normalized_discounted_cummulative_gain(test_data, test_label, np.array(scores))
ndcg_inv = normalized_discounted_cummulative_gain(test_data_T, test_label, np.array(scores))
prec, rec, thr = precision_recall_curve(test_label, scores)
aupr_val = auc(rec, prec)
fpr, tpr, thr = roc_curve(test_label, scores)
auc_val = auc(fpr, tpr)
#!!!!we should distinguish here between inverted and not inverted methods nDCGs!!!!
return aupr_val, auc_val, ndcg, ndcg_inv
def __str__(self):
return "Model: NetLapRLS, gamma_d:%s, gamma_t:%s, beta_d:%s, beta_t:%s" % (self.gamma_d, self.gamma_t, self.beta_d, self.beta_t)
| gpl-2.0 |
mick-d/nipype | tools/make_examples.py | 10 | 3014 | #!/usr/bin/env python
"""Run the py->rst conversion and run all examples.
This also creates the index.rst file appropriately, makes figures, etc.
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import open
from past.builtins import execfile
# -----------------------------------------------------------------------------
# Library imports
# -----------------------------------------------------------------------------
# Stdlib imports
import os
import sys
from glob import glob
# Third-party imports
# We must configure the mpl backend before making any further mpl imports
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib._pylab_helpers import Gcf
# Local tools
from toollib import *
# -----------------------------------------------------------------------------
# Globals
# -----------------------------------------------------------------------------
examples_header = """
.. _examples:
Examples
========
.. note_about_examples
"""
# -----------------------------------------------------------------------------
# Function defintions
# -----------------------------------------------------------------------------
# These global variables let show() be called by the scripts in the usual
# manner, but when generating examples, we override it to write the figures to
# files with a known name (derived from the script name) plus a counter
figure_basename = None
# We must change the show command to save instead
def show():
allfm = Gcf.get_all_fig_managers()
for fcount, fm in enumerate(allfm):
fm.canvas.figure.savefig('%s_%02i.png' %
(figure_basename, fcount + 1))
_mpl_show = plt.show
plt.show = show
# -----------------------------------------------------------------------------
# Main script
# -----------------------------------------------------------------------------
# Work in examples directory
cd('users/examples')
if not os.getcwd().endswith('users/examples'):
raise OSError('This must be run from doc/examples directory')
# Run the conversion from .py to rst file
sh('../../../tools/ex2rst --project Nipype --outdir . ../../../examples')
sh('../../../tools/ex2rst --project Nipype --outdir . ../../../examples/frontiers_paper')
# Make the index.rst file
"""
index = open('index.rst', 'w')
index.write(examples_header)
for name in [os.path.splitext(f)[0] for f in glob('*.rst')]:
#Don't add the index in there to avoid sphinx errors and don't add the
#note_about examples again (because it was added at the top):
if name not in(['index','note_about_examples']):
index.write(' %s\n' % name)
index.close()
"""
# Execute each python script in the directory.
if '--no-exec' in sys.argv:
pass
else:
if not os.path.isdir('fig'):
os.mkdir('fig')
for script in glob('*.py'):
figure_basename = pjoin('fig', os.path.splitext(script)[0])
execfile(script)
plt.close('all')
| bsd-3-clause |
poryfly/scikit-learn | sklearn/kernel_ridge.py | 155 | 6545 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
angelmtenor/IDSFC | L1_intro/H_olympics_medal_points.py | 1 | 1606 | import numpy as np
from pandas import DataFrame
def numpy_dot():
"""
Imagine a point system in which each country is awarded 4 points for each
gold medal, 2 points for each silver medal, and one point for each
bronze medal.
Using the numpy.dot function, create a new dataframe called
'olympic_points_df' that includes:
a) a column called 'country_name' with the country name
b) a column called 'points' with the total number of points the country
earned at the Sochi olympics.
You do not need to call the function in your code when running it in the
browser - the grader will do that automatically when you submit or test it.
"""
countries = ['Russian Fed.', 'Norway', 'Canada', 'United States',
'Netherlands', 'Germany', 'Switzerland', 'Belarus',
'Austria', 'France', 'Poland', 'China', 'Korea',
'Sweden', 'Czech Republic', 'Slovenia', 'Japan',
'Finland', 'Great Britain', 'Ukraine', 'Slovakia',
'Italy', 'Latvia', 'Australia', 'Croatia', 'Kazakhstan']
gold = [13, 11, 10, 9, 8, 8, 6, 5, 4, 4, 4, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
silver = [11, 5, 10, 7, 7, 6, 3, 0, 8, 4, 1, 4, 3, 7, 4, 2, 4, 3, 1, 0, 0, 2, 2, 2, 1, 0]
bronze = [9, 10, 5, 12, 9, 5, 2, 1, 5, 7, 1, 2, 2, 6, 2, 4, 3, 1, 2, 1, 0, 6, 2, 1, 0, 1]
# YOUR CODE HERE
points = np.dot([4, 2, 1], [gold, silver, bronze])
olympic_points_df = DataFrame({'country_name': countries, 'points': points})
return olympic_points_df
print(numpy_dot())
| mit |
oiertwo/vampyr | pdftoexcel.py | 1 | 8194 | __author__ = 'oier'
import os
import numpy as np
from data.parameters import true_params
from data.parameters import false_params
import distance as dist
import numpy as np
def pdftotext(path):
os.system("pdftotext {data}".format(data=path))
return(path.replace(".pdf",".txt"))
import pandas as pd
def parse(path):
txt = pd.read_table(path, sep='\n', na_values=False, header=None)
for i in txt.index:
try :
if pd.isnull(float(txt.ix[i])) == False:
name = getname(i,txt)
print(name)
print(float(txt.ix[i]))
except :
pass
def getname(index, df):
name = ""
for i in range(0,index):
size = len(df.ix[i].to_string().split())
idxname = " ".join(df.ix[i].to_string().split()[1:size])
if (len( idxname )> 5) and idxname != None and idxname != "NaN":
name = idxname
#print(name)
return (name)
from collections import deque
def getnamedict(path):
dict = {}
numdict = {}
names = deque()
txt = pd.read_table(path, sep='\n', na_values=False, header=None)
name = ""
for i in txt.index:
try :
size = len(txt.ix[i].to_string().split())
nextname = " ".join(txt.ix[i].to_string().split()[1:size])
if (len( nextname )> 5) and \
nextname != None and \
nextname != "NaN" and \
isclean(nextname) and \
validateparam(nextname):
names.append(nextname)
dict[i] = nextname
#print(name)
#print(nextname)
if pd.isnull(float(txt.ix[i])) == False:
number = float(txt.ix[i])
numdict[names.pop()] = number
#print(number)
#print(i)
except :
pass
print(dict.keys())
print(dict.values())
print(numdict.keys())
print(numdict.values())
#organize(dict,numdict)
# print(dict[i])
def organize(names, numbers):
'''
:param names: must be dictionary
:param numbers: must be dictionary
:return: dictionary, dict[name] = number
'''
numbs = dict(numbers)
nams = dict(names)
conn1 = {}
conn2 = {}
array1 = np.array(nams.keys())
for i in numbs.keys():
actual = 100.0
inconn2 = False
key = min(nams.keys(), key=lambda k: abs(k - i))
print(" {} - {} ".format(key,i))
print(" {} - {} ".format(nams[key],numbs[i]))
'''
for j in numbs.keys():
actual = i - j
if ( actual > conn1[i] or conn1[i] == None):
if( conn2[j] == None):
conn1[i] = j
conn2[j] = actual
else:
best = j
inconn2 = True
else if (conn2[j] != None ):
'''
return()
def isclean(word):
w = str(word)
test = True
strg = "_[]*"
bool = True
for i in range(len(strg)):
c = strg[i]
bool = bool or (w.find(c) != -1)
test = test and (bool)
return(test)
def validateparam(word):
t_dist = []
f_dist = []
for i in true_params:
t_dist.append(dist.levenshtein(word,i))
for i in false_params:
f_dist.append(dist.levenshtein(word, i))
print("Word: {}, T: {} , F: {}".format(word, np.min(t_dist), np.min(f_dist[0])))
if( min(t_dist) == 0):
print("TRUE")
return (True)
if (min(f_dist) == 0):
print("FALSE")
return("FALSE")
if ( np.mean(t_dist )< np.mean(f_dist) ):
print("TRUE")
return(True)
print("FALSE")
return(False)
def getmyarray(path, apath):
dict = {}
appearances = {}
names = deque()
with open(path) as f:
txt = f.readlines()
#txt = pd.read_table(path, sep='\n', na_values=False, header=None)
array_txt = pd.read_table(apath, sep='\n', header=None)
name = ""
for i in txt:
actual = i.replace("\n", '')
if(len(actual.strip()) == 0):
continue
try :
number = float(actual)
if (number > 10000000):
continue
try:
appearances[actual] += 1
except:
appearances[actual] = 1
name = localgetmyarray(path, apath, actual, appearances[i])
dict[name] = i
print("name: {} numb: {}".format(name, i))
except :
pass
print(dict.keys())
print(dict.values())
def localgetmyarray(path, apath, word, count):
with open(path) as f:
txt = f.readlines()
#txt = pd.read_table(path, sep='\n', na_values=False, header=None)
f = open(apath)
array_txt_str = f.read()
name = ""
idx = [k.start() for k in re.finditer(word, array_txt_str)][count -1]
opt = len(array_txt_str)
apps ={}
for i in txt:
try :
nextname = i.replace("\n", '')
try :
float(nextname)
except :
if (len( nextname )> 5) and nextname != None and \
nextname != "NaN" and isclean(nextname):
try:
apps[nextname ] += 1
except:
apps[nextname] = 1
id = [k for k in re.finditer(nextname, array_txt_str)][apps[nextname]-1].start()
myopt = idx - id
if (myopt > 0) and (myopt < opt):
opt = myopt
name = nextname
except :
pass
print("optimum: {} number: {} found: {}".format(opt, word, name))
f.close()
return name
#DOWN FROM HERE JAVA+PYTHON PDF TO TEXT:
import re
import extractText as txt
def remove_unwanted(str):
s = re.sub(r'\[.*?\]', '',str)
s = s.replace("\*", "")
s = s.replace("\n", "")
return (s)
def line_control(str):
#may return True if str is not valid
#returns false if str is valid
if(len(str) < 15):
return True
if(len(str) == 1):
return True
if(len(str.split(" ")) > 10):
return True
return False
def line_parser(str):
item = ''
valor = ''
dict = {}
sline = str.split(" ")
helper = {}
pos = 0
for schar in sline:
try:
#dict["val"]
if(len(dict.keys()) == 3 and len(sline) > 6):
helper[pos] = dict
dict = {}
pos += 1
dict["val"] #to force failure/raise ofd exception
except:
try:
valor = ''
table = [char for char in schar if '/' in char]
if schar.find('%') != -1:
valor = schar
if len(table) > 0:
valor = schar
if(valor != ''):
dict["val"] = valor
continue
except:
pass
try:
#dict["num"]
if(len(dict.keys()) == 3 and len(sline) > 6):
helper[pos] = dict
dict = {}
pos += 1
dict["num"]
except:
try:
num = float(schar)
if(num > 10000):
return({})
dict["num"] = num
continue
except:
pass
try:
dict["item"] += " " + schar
except:
dict["item"] = schar
helper[pos] = dict
return(helper)
def getfromjava(path, dest=''):
if (dest == ''):
d = path.replace(".pdf", ".txt")
txt.extractText(path, d, '')
with open(d) as f:
text = f.readlines()
for line in text:
sline = remove_unwanted(line)
if(line_control(sline) == True):
continue
dict = line_parser(sline)
for i in dict.keys():
if(len(dict[i].keys()) == 3):
print("ITEM: {} NUM: {} VAL: {}".format(dict[i]["item"], dict[i]["num"], dict[i]["val"]))
| mit |
wrightni/OSSP | segment.py | 1 | 6298 | # title: Watershed Transform
# author: Nick Wright
# adapted from: Justin Chen, Arnold Song
import numpy as np
import gc
import warnings
from skimage import filters, morphology, feature, img_as_ubyte
from scipy import ndimage
from ctypes import *
from lib import utils
# For Testing:
from skimage import segmentation
import matplotlib.image as mimg
def segment_image(input_data, image_type=False):
'''
Wrapper function that handles all of the processing to create watersheds
'''
#### Define segmentation parameters
# High_threshold:
# Low_threshold: Lower threshold for canny edge detection. Determines which "weak" edges to keep.
# Values above this amount that are connected to a strong edge will be marked as an edge.
# Gauss_sigma: sigma value to use in the gaussian blur applied to the image prior to segmentation.
# Value chosen here should be based on the quality and resolution of the image
# Feature_separation: minimum distance, in pixels, between the center point of multiple features. Use a lower value
# for lower resolution (.5m) images, and higher resolution for aerial images (~.1m).
# These values are dependent on the type of imagery being processed, and are
# mostly empirically derived.
# band_list contains the three bands to be used for segmentation
if image_type == 'pan':
high_threshold = 0.15 * 255 ## Needs to be checked
low_threshold = 0.05 * 255 ## Needs to be checked
gauss_sigma = 1
feature_separation = 1
band_list = [0, 0, 0]
elif image_type == 'wv02_ms':
high_threshold = 0.20 * 255 ## Needs to be checked
low_threshold = 0.05 * 255 ## Needs to be checked
gauss_sigma = 1.5
feature_separation = 3
band_list = [4, 2, 1]
else: #image_type == 'srgb'
high_threshold = 0.15 * 255
low_threshold = 0.05 * 255
gauss_sigma = 2
feature_separation = 5
band_list = [0, 1, 2]
segmented_data = watershed_transformation(input_data, band_list, low_threshold, high_threshold,
gauss_sigma,feature_separation)
# Method that provides the user an option to view the original image
# side by side with the segmented image.
# print(np.amax(segmented_data))
# image_data = np.array([input_data[band_list[0]],
# input_data[band_list[1]],
# input_data[band_list[2]]],
# dtype=np.uint8)
# ws_bound = segmentation.find_boundaries(segmented_data)
# ws_display = utils.create_composite(image_data)
#
# # save_name = '/Users/nicholas/Desktop/original_{}.png'
# # mimg.imsave(save_name.format(np.random.randint(0,100)), ws_display, format='png')
#
# ws_display[:, :, 0][ws_bound] = 240
# ws_display[:, :, 1][ws_bound] = 80
# ws_display[:, :, 2][ws_bound] = 80
#
# save_name = '/Users/nicholas/Desktop/seg_{}.png'
# mimg.imsave(save_name.format(np.random.randint(0, 100)), ws_display, format='png')
return segmented_data
def watershed_transformation(image_data, band_list, low_threshold, high_threshold, gauss_sigma, feature_separation):
'''
Runs a watershed transform on the main dataset
1. Create a gradient image using the sobel algorithm
2. Adjust the gradient image based on given threshold and amplification.
3. Find the local minimum gradient values and place a marker
4. Construct watersheds on top of the gradient image starting at the
markers.
'''
# If this block has no data, return a placeholder watershed.
if np.amax(image_data[0]) <= 1:
# We just need the dimensions from one band
return np.zeros(np.shape(image_data[0]))
# Build a raster of detected edges to inform the creation of watershed seed points
edge_image = edge_detect(image_data, band_list, gauss_sigma, low_threshold, high_threshold)
# Build a raster of image gradient that will be the base for watershed expansion.
grad_image = build_gradient(image_data, band_list, gauss_sigma)
image_data = None
# Find local minimum values in the edge image by inverting
# edge_image and finding the local maximum values
inv_edge = np.empty_like(edge_image, dtype=np.uint8)
np.subtract(255, edge_image, out=inv_edge)
edge_image = None
# Distance to the nearest detected edge
distance_image = ndimage.distance_transform_edt(inv_edge)
inv_edge = None
# Local maximum distance
local_min = feature.peak_local_max(distance_image, min_distance=feature_separation,
exclude_border=False, indices=False, num_peaks_per_label=1)
distance_image = None
markers = ndimage.label(local_min)[0]
local_min = None
# Build a watershed from the markers on top of the edge image
im_watersheds = morphology.watershed(grad_image, markers)
grad_image = None
# Set all values outside of the image area (empty pixels, usually caused by
# orthorectification) to one value, at the end of the watershed list.
# im_watersheds[empty_pixels] = np.amax(im_watersheds)+1
gc.collect()
return im_watersheds
def edge_detect(image_data, band_list, gauss_sigma, low_threshold, high_threshold):
# Detect edges in the image with a canny edge detector
with warnings.catch_warnings():
warnings.simplefilter("ignore")
edge_image = img_as_ubyte(feature.canny(image_data[band_list[1]], sigma=gauss_sigma,
low_threshold=low_threshold, high_threshold=high_threshold))
return edge_image
def build_gradient(image_data, band_list, gauss_sigma):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
smooth_im_blue = ndimage.filters.gaussian_filter(image_data[band_list[2]], sigma=gauss_sigma)
grad_image = img_as_ubyte(filters.scharr(smooth_im_blue))
# Prevent the watersheds from 'leaking' along the sides of the image
grad_image[:, 0] = grad_image[:, 1]
grad_image[:, -1] = grad_image[:, -2]
grad_image[0, :] = grad_image[1, :]
grad_image[-1, :] = grad_image[-2, :]
return grad_image | mit |
abhishekgahlot/scikit-learn | examples/applications/topics_extraction_with_nmf.py | 106 | 2313 | """
========================================================
Topics extraction with Non-Negative Matrix Factorization
========================================================
This is a proof of concept application of Non Negative Matrix
Factorization of the term frequency matrix of a corpus of documents so
as to extract an additive model of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware than the time complexity
is polynomial.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting TF-IDF features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data[:n_samples])
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
feature_names = vectorizer.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
| bsd-3-clause |
saketkc/statsmodels | tools/backport_pr.py | 30 | 5263 | #!/usr/bin/env python
"""
Backport pull requests to a particular branch.
Usage: backport_pr.py branch [PR]
e.g.:
python tools/backport_pr.py 0.13.1 123
to backport PR #123 onto branch 0.13.1
or
python tools/backport_pr.py 1.x
to see what PRs are marked for backport that have yet to be applied.
Copied from IPython 9e82bc5
https://github.com/ipython/ipython/blob/master/tools/backport_pr.py
"""
from __future__ import print_function
import os
import re
import sys
from subprocess import Popen, PIPE, check_call, check_output
from urllib import urlopen
from gh_api import (
get_issues_list,
get_pull_request,
get_pull_request_files,
is_pull_request,
get_milestone_id,
)
from pandas import Series
def find_rejects(root='.'):
for dirname, dirs, files in os.walk(root):
for fname in files:
if fname.endswith('.rej'):
yield os.path.join(dirname, fname)
def get_current_branch():
branches = check_output(['git', 'branch'])
for branch in branches.splitlines():
if branch.startswith('*'):
return branch[1:].strip()
def backport_pr(branch, num, project='statsmodels/statsmodels'):
current_branch = get_current_branch()
if branch != current_branch:
check_call(['git', 'checkout', branch])
check_call(['git', 'pull'])
pr = get_pull_request(project, num, auth=True)
files = get_pull_request_files(project, num, auth=True)
patch_url = pr['patch_url']
title = pr['title']
description = pr['body']
fname = "PR%i.patch" % num
if os.path.exists(fname):
print("using patch from {fname}".format(**locals()))
with open(fname) as f:
patch = f.read()
else:
req = urlopen(patch_url)
patch = req.read()
msg = "Backport PR #%i: %s" % (num, title) + '\n\n' + description
check = Popen(['git', 'apply', '--check', '--verbose'], stdin=PIPE)
a,b = check.communicate(patch)
if check.returncode:
print("patch did not apply, saving to {fname}".format(**locals()))
print("edit {fname} until `cat {fname} | git apply --check` succeeds".format(**locals()))
print("then run tools/backport_pr.py {num} again".format(**locals()))
if not os.path.exists(fname):
with open(fname, 'wb') as f:
f.write(patch)
return 1
p = Popen(['git', 'apply'], stdin=PIPE)
a,b = p.communicate(patch)
filenames = [ f['filename'] for f in files ]
check_call(['git', 'add'] + filenames)
check_call(['git', 'commit', '-m', msg])
print("PR #%i applied, with msg:" % num)
print()
print(msg)
print()
if branch != current_branch:
check_call(['git', 'checkout', current_branch])
return 0
backport_re = re.compile(r"[Bb]ackport.*?(\d+)")
def already_backported(branch, since_tag=None):
"""return set of PRs that have been backported already"""
if since_tag is None:
since_tag = check_output(['git','describe', branch, '--abbrev=0']).decode('utf8').strip()
cmd = ['git', 'log', '%s..%s' % (since_tag, branch), '--oneline']
lines = check_output(cmd).decode('utf8')
return set(int(num) for num in backport_re.findall(lines))
def should_backport(labels=None, milestone=None):
"""return set of PRs marked for backport"""
if labels is None and milestone is None:
raise ValueError("Specify one of labels or milestone.")
elif labels is not None and milestone is not None:
raise ValueError("Specify only one of labels or milestone.")
if labels is not None:
issues = get_issues_list("statsmodels/statsmodels",
labels=labels,
state='closed',
auth=True,
)
else:
milestone_id = get_milestone_id("statsmodels/statsmodels", milestone,
auth=True)
issues = get_issues_list("statsmodels/statsmodels",
milestone=milestone_id,
state='closed',
auth=True,
)
should_backport = []
merged_dates = []
for issue in issues:
if not is_pull_request(issue):
continue
pr = get_pull_request("statsmodels/statsmodels", issue['number'],
auth=True)
if not pr['merged']:
print ("Marked PR closed without merge: %i" % pr['number'])
continue
if pr['number'] not in should_backport:
merged_dates.append(pr['merged_at'])
should_backport.append(pr['number'])
return Series(merged_dates, index=should_backport)
if __name__ == '__main__':
if len(sys.argv) < 2:
print(__doc__)
sys.exit(1)
if len(sys.argv) < 3:
branch = sys.argv[1]
already = already_backported(branch)
#NOTE: change this to the label you've used for marking a backport
should = should_backport(milestone="0.5.1")
print ("The following PRs should be backported:")
to_backport = []
if already:
should = should.ix[set(should.index).difference(already)]
should.sort()
for pr, date in should.iteritems():
print (pr)
sys.exit(0)
sys.exit(backport_pr(sys.argv[1], int(sys.argv[2])))
| bsd-3-clause |
RachitKansal/scikit-learn | examples/mixture/plot_gmm_classifier.py | 250 | 3918 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <ronweiss@gmail.com>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
duolinwang/MusiteDeep | MusiteDeep/train_general.py | 1 | 4521 | import sys
import os
import pandas as pd
import numpy as np
import argparse
def main():
parser=argparse.ArgumentParser(description='MusiteDeep custom training tool for general PTM prediction.')
parser.add_argument('-input', dest='inputfile', type=str, help='training data in fasta format. Sites followed by "#" are positive sites for a specific PTM prediction.', required=True)
parser.add_argument('-output-prefix', dest='outputprefix', type=str, help='prefix of the output files (model and parameter files).', required=True)
parser.add_argument('-residue-types', dest='residues', type=str, help='Residue types that this model focus on. For multiple residues, seperate each with \',\'. \n\
Note: all the residues specified by this parameter will be trained in one model.', required=True)
parser.add_argument('-valinput', dest='valfile', type=str, help='validation data in fasta format if any. It will randomly select 10 percent of samples from the training data as a validation data set, if no validation file is provided.', required=False,default=None)
parser.add_argument('-nclass', dest='nclass', type=int, help='number of classifiers to be trained for one time. [Default:5]', required=False, default=5)
parser.add_argument('-window', dest='window', type=int, help='window size: the number of amino acid of the left part or right part adjacent to a potential PTM site. 2*\'windo size\'+1 amino acid will be extracted for one protential fragment. [Default:16]', required=False, default=16)
parser.add_argument('-maxneg', dest='maxneg', type=int, help='maximum iterations for each classifier which controls the maximum copy number of the negative data which has the same size with the positive data. [Default: 50]', required=False, default=50)
parser.add_argument('-nb_epoch', dest='nb_epoch', type=int, help='number of epoches for one bootstrap step. It is invalidate, if earlystop is set.', required=False, default=None)
parser.add_argument('-earlystop', dest='earlystop', type=int, help='after the \'earlystop\' number of epochs with no improvement the training will be stopped for one bootstrap step. [Default: 20]', required=False, default=20)
parser.add_argument('-inputweights', dest='inputweights', type=int, help='Initial weights saved in a HDF5 file.', required=False, default=None)
parser.add_argument('-backupweights', dest='backupweights', type=int, help='Set the intermediate weights for backup in a HDF5 file.', required=False, default=None)
parser.add_argument('-transferlayer', dest='transferlayer', type=int, help='Set the last \'transferlayer\' number of layers to be randomly initialized.', required=False, default=1)
args = parser.parse_args()
inputfile=args.inputfile;
valfile=args.valfile;
outputprefix=args.outputprefix;
nclass=args.nclass;
window=args.window;
maxneg=args.maxneg;
np_epoch2=args.nb_epoch;
earlystop=args.earlystop;
inputweights=args.inputweights;
backupweights=args.backupweights;
transferlayer=args.transferlayer
residues=args.residues.split(",")
outputmodel=outputprefix+str("_HDF5model");
outputparameter=outputprefix+str("_parameters");
try:
output = open(outputparameter, 'w')
except IOError:
print 'cannot write to ' + outputparameter+ "!\n";
exit()
else:
print >> output, "%d\t%d\t%s\tgeneral" % (nclass,window,args.residues)
from methods.Bootstrapping_allneg_continue_val import bootStrapping_allneg_continue_val
from methods.EXtractfragment_sort import extractFragforTraining
trainfrag=extractFragforTraining(inputfile,window,'-',focus=residues)
if(valfile is not None):
valfrag=extractFragforTraining(valfile,window,'-',focus= residues)
else:
valfrag=None;
for bt in range(nclass):
models=bootStrapping_allneg_continue_val(trainfrag.as_matrix(),valfile=valfrag,
srate=1,nb_epoch1=1,nb_epoch2=np_epoch2,earlystop=earlystop,maxneg=maxneg,
outputweights=backupweights,
inputweights=inputweights,
transferlayer=transferlayer)
models.save_weights(outputmodel+'_class'+str(bt),overwrite=True)
if __name__ == "__main__":
main()
| gpl-2.0 |
rmhyman/DataScience | Lesson1/IntroToPandas.py | 1 | 1976 | import pandas as pd
'''
The following code is to help you play with the concept of Series in Pandas.
You can think of Series as an one-dimensional object that is similar to
an array, list, or column in a database. By default, it will assign an
index label to each item in the Series ranging from 0 to N, where N is
the number of items in the Series minus one.
Please feel free to play around with the concept of Series and see what it does
*This playground is inspired by Greg Reda's post on Intro to Pandas Data Structures:
http://www.gregreda.com/2013/10/26/intro-to-pandas-data-structures/
'''
# Change False to True to create a Series object
if True:
series = pd.Series(['Dave', 'Cheng-Han', 'Udacity', 42, -1789710578])
print series
'''
You can also manually assign indices to the items in the Series when
creating the series
'''
# Change False to True to see custom index in action
if False:
series = pd.Series(['Dave', 'Cheng-Han', 359, 9001],
index=['Instructor', 'Curriculum Manager',
'Course Number', 'Power Level'])
print series
'''
You can use index to select specific items from the Series
'''
# Change False to True to see Series indexing in action
if False:
series = pd.Series(['Dave', 'Cheng-Han', 359, 9001],
index=['Instructor', 'Curriculum Manager',
'Course Number', 'Power Level'])
print series['Instructor']
print ""
print series[['Instructor', 'Curriculum Manager', 'Course Number']]
'''
You can also use boolean operators to select specific items from the Series
'''
# Change False to True to see boolean indexing in action
if True:
cuteness = pd.Series([1, 2, 3, 4, 5], index=['Cockroach', 'Fish', 'Mini Pig',
'Puppy', 'Kitten'])
print cuteness > 3
print ""
print cuteness[cuteness > 3]
| mit |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/__init__.py | 27 | 3086 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.18.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'exceptions', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve', 'linear_model', 'manifold', 'metrics',
'mixture', 'model_selection', 'multiclass', 'multioutput',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| unlicense |
WarrenWeckesser/scipy | scipy/interpolate/fitpack.py | 16 | 26807 | __all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
import warnings
import numpy as np
# These are in the API for fitpack even if not used in fitpack.py itself.
from ._fitpack_impl import bisplrep, bisplev, dblint
from . import _fitpack_impl as _impl
from ._bsplines import BSpline
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
full_output=0, nest=None, per=0, quiet=1):
"""
Find the B-spline representation of an N-D curve.
Given a list of N rank-1 arrays, `x`, which represent a curve in
N-D space parametrized by `u`, find a smooth approximating
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
Parameters
----------
x : array_like
A list of sample vector arrays representing the curve.
w : array_like, optional
Strictly positive rank-1 array of weights the same length as `x[0]`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard-deviation given by
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
u : array_like, optional
An array of parameter values. If not given, these values are
calculated automatically as ``M = len(x[0])``, where
v[0] = 0
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
u[i] = v[i] / v[M-1]
ub, ue : int, optional
The end-points of the parameters interval. Defaults to
u[0] and u[-1].
k : int, optional
Degree of the spline. Cubic splines are recommended.
Even values of `k` should be avoided especially with a small s-value.
``1 <= k <= 5``, default is 3.
task : int, optional
If task==0 (default), find t and c for a given smoothing factor, s.
If task==1, find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1
for the same set of data.
If task=-1 find the weighted least square spline for a given set of
knots, t.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
where g(x) is the smoothed interpolation of (x,y). The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger `s` means more smoothing while smaller values of `s`
indicate less smoothing. Recommended values of `s` depend on the
weights, w. If the weights represent the inverse of the
standard-deviation of y, then a good `s` value should be found in
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
data points in x, y, and w.
t : int, optional
The knots needed for task=-1.
full_output : int, optional
If non-zero, then return optional outputs.
nest : int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. By default nest=m/2.
Always large enough is nest=m+k+1.
per : int, optional
If non-zero, data points are considered periodic with period
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
quiet : int, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
(t,c,k) a tuple containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
u : array
An array of the values of the parameter.
fp : float
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splrep, splev, sproot, spalde, splint,
bisplrep, bisplev
UnivariateSpline, BivariateSpline
BSpline
make_interp_spline
Notes
-----
See `splev` for evaluation of the spline and its derivatives.
The number of dimensions N must be smaller than 11.
The number of coefficients in the `c` array is ``k+1`` less then the number
of knots, ``len(t)``. This is in contrast with `splrep`, which zero-pads
the array of coefficients to have the same length as the array of knots.
These additional coefficients are ignored by evaluation routines, `splev`
and `BSpline`.
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines", report tw55, Dept. Computer Science,
K.U.Leuven, 1981.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Generate a discretization of a limacon curve in the polar coordinates:
>>> phi = np.linspace(0, 2.*np.pi, 40)
>>> r = 0.5 + np.cos(phi) # polar coords
>>> x, y = r * np.cos(phi), r * np.sin(phi) # convert to cartesian
And interpolate:
>>> from scipy.interpolate import splprep, splev
>>> tck, u = splprep([x, y], s=0)
>>> new_points = splev(u, tck)
Notice that (i) we force interpolation by using `s=0`,
(ii) the parameterization, ``u``, is generated automatically.
Now plot the result:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x, y, 'ro')
>>> ax.plot(new_points[0], new_points[1], 'r-')
>>> plt.show()
"""
res = _impl.splprep(x, w, u, ub, ue, k, task, s, t, full_output, nest, per,
quiet)
return res
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
full_output=0, per=0, quiet=1):
"""
Find the B-spline representation of a 1-D curve.
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
approximation of degree k on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like
The data points defining a curve y = f(x).
w : array_like, optional
Strictly positive rank-1 array of weights the same length as x and y.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the y values have standard-deviation given by the
vector d, then w should be 1/d. Default is ones(len(x)).
xb, xe : float, optional
The interval to fit. If None, these default to x[0] and x[-1]
respectively.
k : int, optional
The degree of the spline fit. It is recommended to use cubic splines.
Even values of k should be avoided especially with small s values.
1 <= k <= 5
task : {1, 0, -1}, optional
If task==0 find t and c for a given smoothing factor, s.
If task==1 find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1 for the same
set of data (t will be stored an used internally)
If task=-1 find the weighted least square spline for a given set of
knots, t. These should be interior knots as knots on the ends will be
added automatically.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x)
is the smoothed interpolation of (x,y). The user can use s to control
the tradeoff between closeness and smoothness of fit. Larger s means
more smoothing while smaller values of s indicate less smoothing.
Recommended values of s depend on the weights, w. If the weights
represent the inverse of the standard-deviation of y, then a good s
value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
weights are supplied. s = 0.0 (interpolating) if no weights are
supplied.
t : array_like, optional
The knots needed for task=-1. If given then task is automatically set
to -1.
full_output : bool, optional
If non-zero, then return optional outputs.
per : bool, optional
If non-zero, data points are considered periodic with period x[m-1] -
x[0] and a smooth periodic spline approximation is returned. Values of
y[m-1] and w[m-1] are not used.
quiet : bool, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
fp : array, optional
The weighted sum of squared residuals of the spline approximation.
ier : int, optional
An integer flag about splrep success. Success is indicated if ier<=0.
If ier in [1,2,3] an error occurred but was not raised. Otherwise an
error is raised.
msg : str, optional
A message corresponding to the integer flag, ier.
See Also
--------
UnivariateSpline, BivariateSpline
splprep, splev, sproot, spalde, splint
bisplrep, bisplev
BSpline
make_interp_spline
Notes
-----
See `splev` for evaluation of the spline and its derivatives. Uses the
FORTRAN routine ``curfit`` from FITPACK.
The user is responsible for assuring that the values of `x` are unique.
Otherwise, `splrep` will not return sensible results.
If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
This routine zero-pads the coefficients array ``c`` to have the same length
as the array of knots ``t`` (the trailing ``k + 1`` coefficients are ignored
by the evaluation routines, `splev` and `BSpline`.) This is in contrast with
`splprep`, which does not zero-pad the coefficients.
References
----------
Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
integration of experimental data using spline functions",
J.Comp.Appl.Maths 1 (1975) 165-184.
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
1286-1304.
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
You can interpolate 1-D points with a B-spline curve.
Further examples are given in
:ref:`in the tutorial <tutorial-interpolate_splXXX>`.
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import splev, splrep
>>> x = np.linspace(0, 10, 10)
>>> y = np.sin(x)
>>> spl = splrep(x, y)
>>> x2 = np.linspace(0, 10, 200)
>>> y2 = splev(x2, spl)
>>> plt.plot(x, y, 'o', x2, y2)
>>> plt.show()
"""
res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet)
return res
def splev(x, tck, der=0, ext=0):
"""
Evaluate a B-spline or its derivatives.
Given the knots and coefficients of a B-spline representation, evaluate
the value of the smoothing polynomial and its derivatives. This is a
wrapper around the FORTRAN routines splev and splder of FITPACK.
Parameters
----------
x : array_like
An array of points at which to return the value of the smoothed
spline or its derivatives. If `tck` was returned from `splprep`,
then the parameter values, u should be given.
tck : 3-tuple or a BSpline object
If a tuple, then it should be a sequence of length 3 returned by
`splrep` or `splprep` containing the knots, coefficients, and degree
of the spline. (Also see Notes.)
der : int, optional
The order of derivative of the spline to compute (must be less than
or equal to k, the degree of the spline).
ext : int, optional
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0, return the extrapolated value.
* if ext=1, return 0
* if ext=2, raise a ValueError
* if ext=3, return the boundary value.
The default value is 0.
Returns
-------
y : ndarray or list of ndarrays
An array of values representing the spline function evaluated at
the points in `x`. If `tck` was returned from `splprep`, then this
is a list of arrays representing the curve in an N-D space.
Notes
-----
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using `BSpline` objects.
See Also
--------
splprep, splrep, sproot, spalde, splint
bisplrep, bisplev
BSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling splev() with BSpline objects with c.ndim > 1 is "
"not recommended. Use BSpline.__call__(x) instead.")
warnings.warn(mesg, DeprecationWarning)
# remap the out-of-bounds behavior
try:
extrapolate = {0: True, }[ext]
except KeyError as e:
raise ValueError("Extrapolation mode %s is not supported "
"by BSpline." % ext) from e
return tck(x, der, extrapolate=extrapolate)
else:
return _impl.splev(x, tck, der, ext)
def splint(a, b, tck, full_output=0):
"""
Evaluate the definite integral of a B-spline between two given points.
Parameters
----------
a, b : float
The end-points of the integration interval.
tck : tuple or a BSpline instance
If a tuple, then it should be a sequence of length 3, containing the
vector of knots, the B-spline coefficients, and the degree of the
spline (see `splev`).
full_output : int, optional
Non-zero to return optional output.
Returns
-------
integral : float
The resulting integral.
wrk : ndarray
An array containing the integrals of the normalized B-splines
defined on the set of knots.
(Only returned if `full_output` is non-zero)
Notes
-----
`splint` silently assumes that the spline function is zero outside the data
interval (`a`, `b`).
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
See Also
--------
splprep, splrep, sproot, spalde, splev
bisplrep, bisplev
BSpline
References
----------
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
J. Inst. Maths Applics, 17, p.37-41, 1976.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling splint() with BSpline objects with c.ndim > 1 is "
"not recommended. Use BSpline.integrate() instead.")
warnings.warn(mesg, DeprecationWarning)
if full_output != 0:
mesg = ("full_output = %s is not supported. Proceeding as if "
"full_output = 0" % full_output)
return tck.integrate(a, b, extrapolate=False)
else:
return _impl.splint(a, b, tck, full_output)
def sproot(tck, mest=10):
"""
Find the roots of a cubic B-spline.
Given the knots (>=8) and coefficients of a cubic B-spline return the
roots of the spline.
Parameters
----------
tck : tuple or a BSpline object
If a tuple, then it should be a sequence of length 3, containing the
vector of knots, the B-spline coefficients, and the degree of the
spline.
The number of knots must be >= 8, and the degree must be 3.
The knots must be a montonically increasing sequence.
mest : int, optional
An estimate of the number of zeros (Default is 10).
Returns
-------
zeros : ndarray
An array giving the roots of the spline.
Notes
-----
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
See also
--------
splprep, splrep, splint, spalde, splev
bisplrep, bisplev
BSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling sproot() with BSpline objects with c.ndim > 1 is "
"not recommended.")
warnings.warn(mesg, DeprecationWarning)
t, c, k = tck.tck
# _impl.sproot expects the interpolation axis to be last, so roll it.
# NB: This transpose is a no-op if c is 1D.
sh = tuple(range(c.ndim))
c = c.transpose(sh[1:] + (0,))
return _impl.sproot((t, c, k), mest)
else:
return _impl.sproot(tck, mest)
def spalde(x, tck):
"""
Evaluate all derivatives of a B-spline.
Given the knots and coefficients of a cubic B-spline compute all
derivatives up to order k at a point (or set of points).
Parameters
----------
x : array_like
A point or a set of points at which to evaluate the derivatives.
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
tck : tuple
A tuple ``(t, c, k)``, containing the vector of knots, the B-spline
coefficients, and the degree of the spline (see `splev`).
Returns
-------
results : {ndarray, list of ndarrays}
An array (or a list of arrays) containing all derivatives
up to order k inclusive for each point `x`.
See Also
--------
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
BSpline
References
----------
.. [1] C. de Boor: On calculating with b-splines, J. Approximation Theory
6 (1972) 50-62.
.. [2] M. G. Cox : The numerical evaluation of b-splines, J. Inst. Maths
applics 10 (1972) 134-149.
.. [3] P. Dierckx : Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
"""
if isinstance(tck, BSpline):
raise TypeError("spalde does not accept BSpline instances.")
else:
return _impl.spalde(x, tck)
def insert(x, tck, m=1, per=0):
"""
Insert knots into a B-spline.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
This is a wrapper around the FORTRAN routine insert of FITPACK.
Parameters
----------
x (u) : array_like
A 1-D point at which to insert a new knot(s). If `tck` was returned
from ``splprep``, then the parameter values, u should be given.
tck : a `BSpline` instance or a tuple
If tuple, then it is expected to be a tuple (t,c,k) containing
the vector of knots, the B-spline coefficients, and the degree of
the spline.
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
per : int, optional
If non-zero, the input spline is considered periodic.
Returns
-------
BSpline instance or a tuple
A new B-spline with knots t, coefficients c, and degree k.
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
In case of a periodic spline (``per != 0``) there must be
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
Notes
-----
Based on algorithms from [1]_ and [2]_.
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
Examples
--------
You can insert knots into a B-spline.
>>> from scipy.interpolate import splrep, insert
>>> x = np.linspace(0, 10, 5)
>>> y = np.sin(x)
>>> tck = splrep(x, y)
>>> tck[0]
array([ 0., 0., 0., 0., 5., 10., 10., 10., 10.])
A knot is inserted:
>>> tck_inserted = insert(3, tck)
>>> tck_inserted[0]
array([ 0., 0., 0., 0., 3., 5., 10., 10., 10., 10.])
Some knots are inserted:
>>> tck_inserted2 = insert(8, tck, m=3)
>>> tck_inserted2[0]
array([ 0., 0., 0., 0., 5., 8., 8., 8., 10., 10., 10., 10.])
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
# FITPACK expects the interpolation axis to be last, so roll it over
# NB: if c array is 1D, transposes are no-ops
sh = tuple(range(c.ndim))
c = c.transpose(sh[1:] + (0,))
t_, c_, k_ = _impl.insert(x, (t, c, k), m, per)
# and roll the last axis back
c_ = np.asarray(c_)
c_ = c_.transpose((sh[-1],) + sh[:-1])
return BSpline(t_, c_, k_)
else:
return _impl.insert(x, tck, m, per)
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
Parameters
----------
tck : BSpline instance or a tuple of (t, c, k)
Spline whose derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
`BSpline` instance or tuple
Spline of order k2=k-n representing the derivative
of the input spline.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, splev, spalde
BSpline
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import splrep, splder, sproot
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = splrep(x, y, k=4)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> dspl = splder(spl)
>>> sproot(dspl) / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\\pi/2 + n\\pi` of
:math:`\\cos(x) = \\sin'(x)`.
"""
if isinstance(tck, BSpline):
return tck.derivative(n)
else:
return _impl.splder(tck, n)
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
Parameters
----------
tck : BSpline instance or a tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
BSpline instance or a tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
See Also
--------
splder, splev, spalde
BSpline
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. versionadded:: 0.13.0
Examples
--------
>>> from scipy.interpolate import splrep, splder, splantider, splev
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = splrep(x, y)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = splantider(spl)
>>> splev(np.pi/2, ispl) - splev(0, ispl)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
if isinstance(tck, BSpline):
return tck.antiderivative(n)
else:
return _impl.splantider(tck, n)
| bsd-3-clause |
jeffninghan/tracker | OCR_test/ocr_test.py | 1 | 1285 | import numpy as np
import cv2
from matplotlib import pyplot as plt
# test algorithm to recognize digits using kNN
# source: http://docs.opencv.org/trunk/doc/py_tutorials/py_ml/py_knn/py_knn_opencv/py_knn_opencv.html
img = cv2.imread('../data/digits.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Now we split the image to 5000 cells, each 20x20 size
cells = [np.hsplit(row,100) for row in np.vsplit(gray,50)]
# Make it into a Numpy array. It size will be (50,100,20,20)
x = np.array(cells)
# Now we prepare train_data and test_data.
train = x[:,:50].reshape(-1,400).astype(np.float32) # Size = (2500,400)
test = x[:,50:100].reshape(-1,400).astype(np.float32) # Size = (2500,400)
# Create labels for train and test data
k = np.arange(10)
train_labels = np.repeat(k,250)[:,np.newaxis]
test_labels = train_labels.copy()
# Initiate kNN, train the data, then test it with test data for k=1
print "Training using kNN..."
knn = cv2.KNearest()
knn.train(train,train_labels)
ret,result,neighbours,dist = knn.find_nearest(test,k=5)
# Now we check the accuracy of classification
# For that, compare the result with test_labels and check which are wrong
matches = result==test_labels
correct = np.count_nonzero(matches)
accuracy = correct*100.0/result.size
print "Accuracy:", accuracy | mit |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/sphinxext/plot_directive.py | 1 | 28321 | """
A directive for including a matplotlib plot in a Sphinx document.
By default, in HTML output, `plot` will include a .png file with a
link to a high-res .png and .pdf. In LaTeX output, it will include a
.pdf.
The source code for the plot may be included in one of three ways:
1. **A path to a source file** as the argument to the directive::
.. plot:: path/to/plot.py
When a path to a source file is given, the content of the
directive may optionally contain a caption for the plot::
.. plot:: path/to/plot.py
This is the caption for the plot
Additionally, one may specify the name of a function to call (with
no arguments) immediately after importing the module::
.. plot:: path/to/plot.py plot_function1
2. Included as **inline content** to the directive::
.. plot::
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('_static/stinkbug.png')
imgplot = plt.imshow(img)
3. Using **doctest** syntax::
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
Options
-------
The ``plot`` directive supports the following options:
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. The default can be changed
using the `plot_include_source` variable in conf.py
encoding : str
If this source file is in a non-UTF8 or non-ASCII encoding,
the encoding must be specified using the `:encoding:` option.
The encoding will not be inferred using the ``-*- coding -*-``
metacomment.
context : bool or str
If provided, the code will be run in the context of all
previous plot directives for which the `:context:` option was
specified. This only applies to inline code plot directives,
not those run from files. If the ``:context: reset`` option is
specified, the context is reset for this and future plots, and
previous figures are closed prior to running the code.
``:context:close-figs`` keeps the context but closes previous figures
before running the code.
nofigs : bool
If specified, the code block will be run, but no figures will
be inserted. This is usually useful with the ``:context:``
option.
Additionally, this directive supports all of the options of the
`image` directive, except for `target` (since plot will add its own
target). These include `alt`, `height`, `width`, `scale`, `align` and
`class`.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_html_show_source_link
Whether to show a link to the source in HTML.
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which ``plot::`` file names are relative
to. (If None or empty, file names are relative to the
directory where the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen. When passing from
the command line through sphinx_build the list should be passed as
suffix:dpi,suffix:dpi, ....
plot_html_show_formats
Whether to show links to the files in HTML.
plot_rcparams
A dictionary containing any non-standard rcParams that should
be applied before each plot.
plot_apply_rcparams
By default, rcParams are applied when `context` option is not used in
a plot directive. This configuration option overrides this behavior
and applies rcParams before each plot.
plot_working_directory
By default, the working directory will be changed to the directory of
the example, so the code can get at its data files, if any. Also its
path will be added to `sys.path` so it can import any helper modules
sitting beside it. This configuration option can be used to specify
a central directory (also added to `sys.path`) where data files and
helper modules for all code are located.
plot_template
Provide a customized template for preparing restructured text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
import sys, os, shutil, io, re, textwrap
from os.path import relpath
import traceback
import warnings
if not six.PY3:
import cStringIO
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.images import Image
align = Image.align
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[^0-9]', x)[0])
for x in sphinx_version[:2]])
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
import matplotlib
import matplotlib.cbook as cbook
try:
with warnings.catch_warnings(record=True):
warnings.simplefilter("error", UserWarning)
matplotlib.use('Agg')
except UserWarning:
import matplotlib.pyplot as plt
plt.switch_backend("Agg")
else:
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
__version__ = 2
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_context(arg):
if arg in [None, 'reset', 'close-figs']:
return arg
raise ValueError("argument should be None or 'reset' or 'close-figs'")
def _option_format(arg):
return directives.choice(arg, ('python', 'doctest'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
def mark_plot_labels(app, document):
"""
To make plots referenceable, we need to move the reference from
the "htmlonly" (or "latexonly") node to the actual figure node
itself.
"""
for name, explicit in six.iteritems(document.nametypes):
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if node.tagname in ('html_only', 'latex_only'):
for n in node:
if n.tagname == 'figure':
sectname = name
for c in n:
if c.tagname == 'caption':
sectname = c.astext()
break
node['ids'].remove(labelid)
node['names'].remove(name)
n['ids'].append(labelid)
n['names'].append(name)
document.settings.env.labels[name] = \
document.settings.env.docname, labelid, sectname
break
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
'context': _option_context,
'nofigs': directives.flag,
'encoding': directives.encoding
}
app.add_directive('plot', plot_directive, True, (0, 2, False), **options)
app.add_config_value('plot_pre_code', None, True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_html_show_source_link', True, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_config_value('plot_rcparams', {}, True)
app.add_config_value('plot_apply_rcparams', False, True)
app.add_config_value('plot_working_directory', None, True)
app.add_config_value('plot_template', None, True)
app.connect(str('doctree-read'), mark_plot_labels)
#------------------------------------------------------------------------------
# Doctest handling
#------------------------------------------------------------------------------
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
def remove_coding(text):
"""
Remove the coding comment, which six.exec_ doesn't like.
"""
sub_re = re.compile("^#\s*-\*-\s*coding:\s*.*-\*-$", flags=re.MULTILINE)
return sub_re.sub("", text)
#------------------------------------------------------------------------------
# Template
#------------------------------------------------------------------------------
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }}
{% endfor %}
{{ only_latex }}
{% for img in images %}
{% if 'pdf' in img.formats -%}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endif -%}
{% endfor %}
{{ only_texinfo }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% endfor %}
"""
exception_template = """
.. htmlonly::
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
# the context of the plot for all directives specified with the
# :context: option
plot_context = dict()
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived) or
(os.path.exists(original) and
os.stat(derived).st_mtime < os.stat(original).st_mtime))
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None, function_name=None):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
if six.PY2:
pwd = os.getcwdu()
else:
pwd = os.getcwd()
old_sys_path = list(sys.path)
if setup.config.plot_working_directory is not None:
try:
os.chdir(setup.config.plot_working_directory)
except OSError as err:
raise OSError(str(err) + '\n`plot_working_directory` option in'
'Sphinx configuration file must be a valid '
'directory path')
except TypeError as err:
raise TypeError(str(err) + '\n`plot_working_directory` option in '
'Sphinx configuration file must be a string or '
'None')
sys.path.insert(0, setup.config.plot_working_directory)
elif code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
# Redirect stdout
stdout = sys.stdout
if six.PY3:
sys.stdout = io.StringIO()
else:
sys.stdout = cStringIO.StringIO()
# Assign a do-nothing print function to the namespace. There
# doesn't seem to be any other way to provide a way to (not) print
# that works correctly across Python 2 and 3.
def _dummy_print(*arg, **kwarg):
pass
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
if setup.config.plot_pre_code is None:
six.exec_(six.text_type("import numpy as np\n" +
"from matplotlib import pyplot as plt\n"), ns)
else:
six.exec_(six.text_type(setup.config.plot_pre_code), ns)
ns['print'] = _dummy_print
if "__main__" in code:
six.exec_("__name__ = '__main__'", ns)
code = remove_coding(code)
six.exec_(code, ns)
if function_name is not None:
six.exec_(function_name + "()", ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
def clear_state(plot_rcparams, close=True):
if close:
plt.close('all')
matplotlib.rc_file_defaults()
matplotlib.rcParams.update(plot_rcparams)
def render_figures(code, code_path, output_dir, output_base, context,
function_name, config, context_reset=False,
close_figs=False):
"""
Run a pyplot script and save the low and high res PNGs and a PDF
in *output_dir*.
Save the images under *output_dir* with file names derived from
*output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
formats = []
plot_formats = config.plot_formats
if isinstance(plot_formats, six.string_types):
# String Sphinx < 1.3, Split on , to mimic
# Sphinx 1.3 and later. Sphinx 1.3 always
# returns a list.
plot_formats = plot_formats.split(',')
for fmt in plot_formats:
if isinstance(fmt, six.string_types):
if ':' in fmt:
suffix,dpi = fmt.split(':')
formats.append((str(suffix), int(dpi)))
else:
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
if len(code_pieces) > 1:
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
else:
img = ImageFile('%s_%02d' % (output_base, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# We didn't find the files, so build them
results = []
if context:
ns = plot_context
else:
ns = {}
if context_reset:
clear_state(config.plot_rcparams)
plot_context.clear()
close_figs = not context or close_figs
for i, code_piece in enumerate(code_pieces):
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close_figs)
elif close_figs:
plt.close('all')
run_code(code_piece, code_path, ns, function_name)
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
elif len(code_pieces) == 1:
img = ImageFile("%s_%02d" % (output_base, j), output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except Exception as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
results.append((code_piece, images))
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close=not context)
return results
def run(arguments, content, options, state_machine, state, lineno):
# The user may provide a filename *or* Python code content, but not both
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
nofigs = 'nofigs' in options
options.setdefault('include-source', config.plot_include_source)
keep_context = 'context' in options
context_opt = None if not keep_context else options['context']
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if len(arguments):
if not config.plot_basedir:
source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
# If there is content, it will be passed as a caption.
caption = '\n'.join(content)
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
with io.open(source_file_name, 'r', encoding='utf-8') as fd:
code = fd.read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
function_name = None
caption = ''
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
# get rid of .. in paths, also changes pathsep
# see note in Python docs for warning about symbolic links on Windows.
# need to compare source and dest paths at end
build_dir = os.path.normpath(build_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir) # no problem here for me, but just use built-ins
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
try:
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
except ValueError:
# on Windows, relpath raises ValueError when path and start are on
# different mounts/drives
build_dir_link = build_dir
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = render_figures(code,
source_file_name,
build_dir,
output_base,
keep_context,
function_name,
config,
context_reset=context_opt == 'reset',
close_figs=context_opt == 'close-figs')
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
source_file_name, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# Properly indent the caption
caption = '\n'.join(' ' + line.strip()
for line in caption.split('\n'))
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
if nofigs:
images = []
opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
only_texinfo = ".. only:: texinfo"
# Not-None src_link signals the need for a source link in the generated
# html
if j == 0 and config.plot_html_show_source_link:
src_link = source_link
else:
src_link = None
result = format_template(
config.plot_template or TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
only_texinfo=only_texinfo,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats and not nofigs,
caption=caption)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory, if necessary
if not os.path.exists(dest_dir):
cbook.mkdirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
destimg = os.path.join(dest_dir, os.path.basename(fn))
if fn != destimg:
shutil.copyfile(fn, destimg)
# copy script (if necessary)
target_name = os.path.join(dest_dir, output_base + source_ext)
with io.open(target_name, 'w', encoding="utf-8") as f:
if source_file_name == rst_file:
code_escaped = unescape_doctest(code)
else:
code_escaped = code
f.write(code_escaped)
return errors
| mit |
iemejia/beam | sdks/python/apache_beam/examples/complete/juliaset/juliaset/juliaset.py | 5 | 4390 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Julia set computing workflow: https://en.wikipedia.org/wiki/Julia_set.
We use the quadratic polinomial f(z) = z*z + c, with c = -.62772 +.42193i
"""
# pytype: skip-file
import argparse
import apache_beam as beam
from apache_beam.io import WriteToText
def from_pixel(x, y, n):
"""Converts a NxN pixel position to a (-1..1, -1..1) complex number."""
return complex(2.0 * x / n - 1.0, 2.0 * y / n - 1.0)
def get_julia_set_point_color(element, c, n, max_iterations):
"""Given an pixel, convert it into a point in our julia set."""
x, y = element
z = from_pixel(x, y, n)
for i in range(max_iterations):
if z.real * z.real + z.imag * z.imag > 2.0:
break
z = z * z + c
return x, y, i # pylint: disable=undefined-loop-variable
def generate_julia_set_colors(pipeline, c, n, max_iterations):
"""Compute julia set coordinates for each point in our set."""
def point_set(n):
for x in range(n):
for y in range(n):
yield (x, y)
julia_set_colors = (
pipeline
| 'add points' >> beam.Create(point_set(n))
| beam.Map(get_julia_set_point_color, c, n, max_iterations))
return julia_set_colors
def generate_julia_set_visualization(data, n, max_iterations):
"""Generate the pixel matrix for rendering the julia set as an image."""
import numpy as np # pylint: disable=wrong-import-order, wrong-import-position
colors = []
for r in range(0, 256, 16):
for g in range(0, 256, 16):
for b in range(0, 256, 16):
colors.append((r, g, b))
xy = np.zeros((n, n, 3), dtype=np.uint8)
for x, y, iteration in data:
xy[x, y] = colors[iteration * len(colors) // max_iterations]
return xy
def save_julia_set_visualization(out_file, image_array):
"""Save the fractal image of our julia set as a png."""
from matplotlib import pyplot as plt # pylint: disable=wrong-import-order, wrong-import-position
plt.imsave(out_file, image_array, format='png')
def run(argv=None): # pylint: disable=missing-docstring
parser = argparse.ArgumentParser()
parser.add_argument(
'--grid_size',
dest='grid_size',
default=1000,
help='Size of the NxN matrix')
parser.add_argument(
'--coordinate_output',
dest='coordinate_output',
required=True,
help='Output file to write the color coordinates of the image to.')
parser.add_argument(
'--image_output',
dest='image_output',
default=None,
help='Output file to write the resulting image to.')
known_args, pipeline_args = parser.parse_known_args(argv)
with beam.Pipeline(argv=pipeline_args) as p:
n = int(known_args.grid_size)
coordinates = generate_julia_set_colors(p, complex(-.62772, .42193), n, 100)
def x_coord_key(x_y_i):
(x, y, i) = x_y_i
return (x, (x, y, i))
# Group each coordinate triplet by its x value, then write the coordinates
# to the output file with an x-coordinate grouping per line.
# pylint: disable=expression-not-assigned
(
coordinates
| 'x coord key' >> beam.Map(x_coord_key)
| 'x coord' >> beam.GroupByKey()
| 'format' >> beam.Map(
lambda k_coords: ' '.join('(%s, %s, %s)' % c for c in k_coords[1]))
| WriteToText(known_args.coordinate_output))
# Optionally render the image and save it to a file.
# TODO(silviuc): Add this functionality.
# if p.options.image_output is not None:
# julia_set_image = generate_julia_set_visualization(
# file_with_coordinates, n, 100)
# save_julia_set_visualization(p.options.image_output, julia_set_image)
| apache-2.0 |
hlin117/statsmodels | examples/python/regression_plots.py | 33 | 9585 |
## Regression Plots
from __future__ import print_function
from statsmodels.compat import lzip
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.formula.api import ols
### Duncan's Prestige Dataset
#### Load the Data
# We can use a utility function to load any R dataset available from the great <a href="http://vincentarelbundock.github.com/Rdatasets/">Rdatasets package</a>.
prestige = sm.datasets.get_rdataset("Duncan", "car", cache=True).data
prestige.head()
prestige_model = ols("prestige ~ income + education", data=prestige).fit()
print(prestige_model.summary())
#### Influence plots
# Influence plots show the (externally) studentized residuals vs. the leverage of each observation as measured by the hat matrix.
#
# Externally studentized residuals are residuals that are scaled by their standard deviation where
#
# $$var(\\hat{\epsilon}_i)=\hat{\sigma}^2_i(1-h_{ii})$$
#
# with
#
# $$\hat{\sigma}^2_i=\frac{1}{n - p - 1 \;\;}\sum_{j}^{n}\;\;\;\forall \;\;\; j \neq i$$
#
# $n$ is the number of observations and $p$ is the number of regressors. $h_{ii}$ is the $i$-th diagonal element of the hat matrix
#
# $$H=X(X^{\;\prime}X)^{-1}X^{\;\prime}$$
#
# The influence of each point can be visualized by the criterion keyword argument. Options are Cook's distance and DFFITS, two measures of influence.
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.influence_plot(prestige_model, ax=ax, criterion="cooks")
# As you can see there are a few worrisome observations. Both contractor and reporter have low leverage but a large residual. <br />
# RR.engineer has small residual and large leverage. Conductor and minister have both high leverage and large residuals, and, <br />
# therefore, large influence.
#### Partial Regression Plots
# Since we are doing multivariate regressions, we cannot just look at individual bivariate plots to discern relationships. <br />
# Instead, we want to look at the relationship of the dependent variable and independent variables conditional on the other <br />
# independent variables. We can do this through using partial regression plots, otherwise known as added variable plots. <br />
#
# In a partial regression plot, to discern the relationship between the response variable and the $k$-th variabe, we compute <br />
# the residuals by regressing the response variable versus the independent variables excluding $X_k$. We can denote this by <br />
# $X_{\sim k}$. We then compute the residuals by regressing $X_k$ on $X_{\sim k}$. The partial regression plot is the plot <br />
# of the former versus the latter residuals. <br />
#
# The notable points of this plot are that the fitted line has slope $\beta_k$ and intercept zero. The residuals of this plot <br />
# are the same as those of the least squares fit of the original model with full $X$. You can discern the effects of the <br />
# individual data values on the estimation of a coefficient easily. If obs_labels is True, then these points are annotated <br />
# with their observation label. You can also see the violation of underlying assumptions such as homooskedasticity and <br />
# linearity.
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.plot_partregress("prestige", "income", ["income", "education"], data=prestige, ax=ax)
ax = fig.axes[0]
ax.set_xlim(-2e-15, 1e-14)
ax.set_ylim(-25, 30);
fix, ax = plt.subplots(figsize=(12,14))
fig = sm.graphics.plot_partregress("prestige", "income", ["education"], data=prestige, ax=ax)
# As you can see the partial regression plot confirms the influence of conductor, minister, and RR.engineer on the partial relationship between income and prestige. The cases greatly decrease the effect of income on prestige. Dropping these cases confirms this.
subset = ~prestige.index.isin(["conductor", "RR.engineer", "minister"])
prestige_model2 = ols("prestige ~ income + education", data=prestige, subset=subset).fit()
print(prestige_model2.summary())
# For a quick check of all the regressors, you can use plot_partregress_grid. These plots will not label the <br />
# points, but you can use them to identify problems and then use plot_partregress to get more information.
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_partregress_grid(prestige_model, fig=fig)
#### Component-Component plus Residual (CCPR) Plots
# The CCPR plot provides a way to judge the effect of one regressor on the <br />
# response variable by taking into account the effects of the other <br />
# independent variables. The partial residuals plot is defined as <br />
# $\text{Residuals} + B_iX_i \text{ }\text{ }$ versus $X_i$. The component adds $B_iX_i$ versus <br />
# $X_i$ to show where the fitted line would lie. Care should be taken if $X_i$ <br />
# is highly correlated with any of the other independent variables. If this <br />
# is the case, the variance evident in the plot will be an underestimate of <br />
# the true variance.
fig, ax = plt.subplots(figsize=(12, 8))
fig = sm.graphics.plot_ccpr(prestige_model, "education", ax=ax)
# As you can see the relationship between the variation in prestige explained by education conditional on income seems to be linear, though you can see there are some observations that are exerting considerable influence on the relationship. We can quickly look at more than one variable by using plot_ccpr_grid.
fig = plt.figure(figsize=(12, 8))
fig = sm.graphics.plot_ccpr_grid(prestige_model, fig=fig)
#### Regression Plots
# The plot_regress_exog function is a convenience function that gives a 2x2 plot containing the dependent variable and fitted values with confidence intervals vs. the independent variable chosen, the residuals of the model vs. the chosen independent variable, a partial regression plot, and a CCPR plot. This function can be used for quickly checking modeling assumptions with respect to a single regressor.
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_regress_exog(prestige_model, "education", fig=fig)
#### Fit Plot
# The plot_fit function plots the fitted values versus a chosen independent variable. It includes prediction confidence intervals and optionally plots the true dependent variable.
fig, ax = plt.subplots(figsize=(12, 8))
fig = sm.graphics.plot_fit(prestige_model, "education", ax=ax)
### Statewide Crime 2009 Dataset
# Compare the following to http://www.ats.ucla.edu/stat/stata/webbooks/reg/chapter4/statareg_self_assessment_answers4.htm
#
# Though the data here is not the same as in that example. You could run that example by uncommenting the necessary cells below.
#dta = pd.read_csv("http://www.stat.ufl.edu/~aa/social/csv_files/statewide-crime-2.csv")
#dta = dta.set_index("State", inplace=True).dropna()
#dta.rename(columns={"VR" : "crime",
# "MR" : "murder",
# "M" : "pctmetro",
# "W" : "pctwhite",
# "H" : "pcths",
# "P" : "poverty",
# "S" : "single"
# }, inplace=True)
#
#crime_model = ols("murder ~ pctmetro + poverty + pcths + single", data=dta).fit()
dta = sm.datasets.statecrime.load_pandas().data
crime_model = ols("murder ~ urban + poverty + hs_grad + single", data=dta).fit()
print(crime_model.summary())
#### Partial Regression Plots
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_partregress_grid(crime_model, fig=fig)
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.plot_partregress("murder", "hs_grad", ["urban", "poverty", "single"], ax=ax, data=dta)
#### Leverage-Resid<sup>2</sup> Plot
# Closely related to the influence_plot is the leverage-resid<sup>2</sup> plot.
fig, ax = plt.subplots(figsize=(8,6))
fig = sm.graphics.plot_leverage_resid2(crime_model, ax=ax)
#### Influence Plot
fig, ax = plt.subplots(figsize=(8,6))
fig = sm.graphics.influence_plot(crime_model, ax=ax)
#### Using robust regression to correct for outliers.
# Part of the problem here in recreating the Stata results is that M-estimators are not robust to leverage points. MM-estimators should do better with this examples.
from statsmodels.formula.api import rlm
rob_crime_model = rlm("murder ~ urban + poverty + hs_grad + single", data=dta,
M=sm.robust.norms.TukeyBiweight(3)).fit(conv="weights")
print(rob_crime_model.summary())
#rob_crime_model = rlm("murder ~ pctmetro + poverty + pcths + single", data=dta, M=sm.robust.norms.TukeyBiweight()).fit(conv="weights")
#print(rob_crime_model.summary())
# There aren't yet an influence diagnostics as part of RLM, but we can recreate them. (This depends on the status of [issue #888](https://github.com/statsmodels/statsmodels/issues/808))
weights = rob_crime_model.weights
idx = weights > 0
X = rob_crime_model.model.exog[idx]
ww = weights[idx] / weights[idx].mean()
hat_matrix_diag = ww*(X*np.linalg.pinv(X).T).sum(1)
resid = rob_crime_model.resid
resid2 = resid**2
resid2 /= resid2.sum()
nobs = int(idx.sum())
hm = hat_matrix_diag.mean()
rm = resid2.mean()
from statsmodels.graphics import utils
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(resid2[idx], hat_matrix_diag, 'o')
ax = utils.annotate_axes(range(nobs), labels=rob_crime_model.model.data.row_labels[idx],
points=lzip(resid2[idx], hat_matrix_diag), offset_points=[(-5,5)]*nobs,
size="large", ax=ax)
ax.set_xlabel("resid2")
ax.set_ylabel("leverage")
ylim = ax.get_ylim()
ax.vlines(rm, *ylim)
xlim = ax.get_xlim()
ax.hlines(hm, *xlim)
ax.margins(0,0)
| bsd-3-clause |
etkirsch/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 253 | 4158 | """
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
karthiks1995/dejavu | dejavu/fingerprint.py | 15 | 5828 | import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import (generate_binary_structure,
iterate_structure, binary_erosion)
import hashlib
from operator import itemgetter
IDX_FREQ_I = 0
IDX_TIME_J = 1
######################################################################
# Sampling rate, related to the Nyquist conditions, which affects
# the range frequencies we can detect.
DEFAULT_FS = 44100
######################################################################
# Size of the FFT window, affects frequency granularity
DEFAULT_WINDOW_SIZE = 4096
######################################################################
# Ratio by which each sequential window overlaps the last and the
# next window. Higher overlap will allow a higher granularity of offset
# matching, but potentially more fingerprints.
DEFAULT_OVERLAP_RATIO = 0.5
######################################################################
# Degree to which a fingerprint can be paired with its neighbors --
# higher will cause more fingerprints, but potentially better accuracy.
DEFAULT_FAN_VALUE = 15
######################################################################
# Minimum amplitude in spectrogram in order to be considered a peak.
# This can be raised to reduce number of fingerprints, but can negatively
# affect accuracy.
DEFAULT_AMP_MIN = 10
######################################################################
# Number of cells around an amplitude peak in the spectrogram in order
# for Dejavu to consider it a spectral peak. Higher values mean less
# fingerprints and faster matching, but can potentially affect accuracy.
PEAK_NEIGHBORHOOD_SIZE = 20
######################################################################
# Thresholds on how close or far fingerprints can be in time in order
# to be paired as a fingerprint. If your max is too low, higher values of
# DEFAULT_FAN_VALUE may not perform as expected.
MIN_HASH_TIME_DELTA = 0
MAX_HASH_TIME_DELTA = 200
######################################################################
# If True, will sort peaks temporally for fingerprinting;
# not sorting will cut down number of fingerprints, but potentially
# affect performance.
PEAK_SORT = True
######################################################################
# Number of bits to throw away from the front of the SHA1 hash in the
# fingerprint calculation. The more you throw away, the less storage, but
# potentially higher collisions and misclassifications when identifying songs.
FINGERPRINT_REDUCTION = 20
def fingerprint(channel_samples, Fs=DEFAULT_FS,
wsize=DEFAULT_WINDOW_SIZE,
wratio=DEFAULT_OVERLAP_RATIO,
fan_value=DEFAULT_FAN_VALUE,
amp_min=DEFAULT_AMP_MIN):
"""
FFT the channel, log transform output, find local maxima, then return
locally sensitive hashes.
"""
# FFT the signal and extract frequency components
arr2D = mlab.specgram(
channel_samples,
NFFT=wsize,
Fs=Fs,
window=mlab.window_hanning,
noverlap=int(wsize * wratio))[0]
# apply log transform since specgram() returns linear array
arr2D = 10 * np.log10(arr2D)
arr2D[arr2D == -np.inf] = 0 # replace infs with zeros
# find local maxima
local_maxima = get_2D_peaks(arr2D, plot=False, amp_min=amp_min)
# return hashes
return generate_hashes(local_maxima, fan_value=fan_value)
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
struct = generate_binary_structure(2, 1)
neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)
# find local maxima using our fliter shape
local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
background = (arr2D == 0)
eroded_background = binary_erosion(background, structure=neighborhood,
border_value=1)
# Boolean mask of arr2D with True at peaks
detected_peaks = local_max - eroded_background
# extract peaks
amps = arr2D[detected_peaks]
j, i = np.where(detected_peaks)
# filter peaks
amps = amps.flatten()
peaks = zip(i, j, amps)
peaks_filtered = [x for x in peaks if x[2] > amp_min] # freq, time, amp
# get indices for frequency and time
frequency_idx = [x[1] for x in peaks_filtered]
time_idx = [x[0] for x in peaks_filtered]
if plot:
# scatter of the peaks
fig, ax = plt.subplots()
ax.imshow(arr2D)
ax.scatter(time_idx, frequency_idx)
ax.set_xlabel('Time')
ax.set_ylabel('Frequency')
ax.set_title("Spectrogram")
plt.gca().invert_yaxis()
plt.show()
return zip(frequency_idx, time_idx)
def generate_hashes(peaks, fan_value=DEFAULT_FAN_VALUE):
"""
Hash list structure:
sha1_hash[0:20] time_offset
[(e05b341a9b77a51fd26, 32), ... ]
"""
if PEAK_SORT:
peaks.sort(key=itemgetter(1))
for i in range(len(peaks)):
for j in range(1, fan_value):
if (i + j) < len(peaks):
freq1 = peaks[i][IDX_FREQ_I]
freq2 = peaks[i + j][IDX_FREQ_I]
t1 = peaks[i][IDX_TIME_J]
t2 = peaks[i + j][IDX_TIME_J]
t_delta = t2 - t1
if t_delta >= MIN_HASH_TIME_DELTA and t_delta <= MAX_HASH_TIME_DELTA:
h = hashlib.sha1(
"%s|%s|%s" % (str(freq1), str(freq2), str(t_delta)))
yield (h.hexdigest()[0:FINGERPRINT_REDUCTION], t1)
| mit |
sunyihuan326/DeltaLab | shuwei_fengge/practice_one/model/tt.py | 1 | 3958 | # coding:utf-8
'''
Created on 2017/12/8.
@author: chk01
'''
import scipy.io as scio
# data = scio.loadmat(file)
# from sklearn.model_selection import train_test_split
#
# print(data['X'].shape)
# print(data['Y'].shape)
# X_train, X_test, Y_train, Y_test = train_test_split(data['X'], data['Y'], test_size=0.2)
# print(X_train.shape)
# print(Y_train.shape)
# print(X_test.shape)
# print(Y_test.shape)
import numpy as np
import scipy.io as scio
import tensorflow as tf
from practice_one.model.utils import *
from tensorflow.contrib.factorization import KMeans
from sklearn.ensemble import AdaBoostClassifier
# print(np.e)
# print(-np.log(np.e / (np.e + 8)))
# ZL = tf.Variable([[0, 1, 0, 0, 0, 0, 0, 0, 0]], dtype=tf.float32)
# print(ZL.shape)
# Y = tf.constant([[0, 0, 0, 0, 0, 0, 1, 0, 0]], dtype=tf.float32)
# Y = tf.get_variable(dtype=tf.float32, shape=(1, 2), name='tt',initializer=tf.contrib.layers.xavier_initializer())
# cor_op = tf.argmax(Y, 1)
# pre_op = tf.argmax(ZL, 1)
# cost1 = tf.square(tf.cast(cor_op - pre_op, dtype=tf.float32))
# lost = tf.reduce_mean(
# cost1 + tf.nn.softmax_cross_entropy_with_logits(logits=ZL,
# labels=Y))
# # loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less))
# train_op = tf.train.GradientDescentOptimizer(0.1).minimize(lost)
# init = tf.global_variables_initializer()
# with tf.Session() as sess:
# sess.run(init)
# for i in range(30):
# sess.run(train_op)
# print(sess.run(lost))
# print(sess.run(tf.reduce_mean(cost1)))
# print(sess.run(tf.argmax(ZL, 1)))
# 1.37195
# 2.37195
# parameters = scio.loadmat('kmeans_parameters.mat')
# X_train, X_test, Y_train, Y_test = load_data("face_1_channel_sense.mat")
# print(X_test.shape)
# num_features = 28
# num_classes = 3
#
# X = tf.placeholder(tf.float32, shape=[None, num_features])
# Y = tf.placeholder(tf.float32, shape=[None, num_classes])
#
# kmeans = KMeans(inputs=X, num_clusters=300,
# distance_metric='cosine',
# use_mini_batch=True)
#
# (all_scores, cluster_idx, scores, cluster_centers_initialized, cluster_centers_var, init_op,
# train_op) = kmeans.training_graph()
# cluster_idx = cluster_idx[0] # fix for cluster_idx being a tuple
#
# # Initialize the variables (i.e. assign their default value)
# init_vars = tf.global_variables_initializer()
#
# # Start TensorFlow session
# sess = tf.Session()
# sess.run(init_vars, feed_dict={X: X_test})
# sess.run(init_op, feed_dict={X: X_test})
# cl = sess.run(cluster_idx, feed_dict={X: X_train})
# print("cl",cl)
# print(len(cl))
# parameters = scio.loadmat('kmeans_parameters.mat')
# print("parameters",parameters['labels_map'][0])
# labels_map = tf.convert_to_tensor(parameters['labels_map'][0])
#
# # Evaluation ops
# # Lookup: centroid_id -> label
# cluster_label = tf.nn.embedding_lookup(labels_map, cluster_idx)
#
# # Test Model
# test_x, test_y = X_test, Y_test
# with sess.as_default():
# cluster_label = cluster_label.eval(feed_dict={X: X_test})
#
# c = 0
# for i in range(len(cluster_label)):
# if abs(cluster_label[i] - np.argmax(Y_train, 1)[i]) > 1:
# c += 1. / len(cluster_label)
# print(c)
# tt = scio.loadmat("tt_cluster_label.mat")
# sense = scio.loadmat("sense_cluster.mat")
# tt = tt["tt"][0]
# se = sense["sense"][0]
# for i in range(len(tt)):
# if tt[i] != se[i]:
# print(i, tt[i], se[i])
# # print('correct_prediction', correct_prediction)
# index = [1, 2, 0, 2, 1, 2]
# indice = [[0, 2, 1, 1, 1], [0, 1, 1, 2, 1]]
# a = tf.one_hot(index, 3, axis=0)
# b = tf.one_hot(indice, 3, axis=1)
# with tf.Session() as sess:
# print(sess.run(a))
# print("b", sess.run(b))
file = "face_1_channel_sense"
X_train, X_test, Y_train, Y_test = load_data(file)
clf = AdaBoostClassifier(n_estimators=100)
Y_train = np.argmax(Y_train, 1)
c = clf.fit(X_train, Y_train)
print(c)
| mit |
timcera/mettoolbox | mettoolbox/pet.py | 1 | 10467 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import warnings
from typing import Optional, Union
import numpy as np
import pandas as pd
import typic
from solarpy import declination
from tstoolbox import tsutils
from . import meteolib, utils
warnings.filterwarnings("ignore")
def _columns(tsd, req_column_list=[], optional_column_list=[]):
if None in req_column_list:
raise ValueError(
tsutils.error_wrapper(
"""
You need to supply the column (name or number, data column numbering
starts at 1) for {0} time-series.
Instead you gave {1}""".format(
len(req_column_list), req_column_list
)
)
)
collect = []
for loopvar in req_column_list + optional_column_list:
try:
nloopvar = int(loopvar) - 1
except TypeError:
nloopvar = loopvar
if nloopvar is None:
collect.append(None)
else:
collect.append(tsd.ix[:, nloopvar])
return collect
def _preprocess(
input_ts,
temp_min_col,
temp_max_col,
temp_mean_col,
temp_min_required,
temp_max_required,
skiprows,
names,
index_type,
start_date,
end_date,
round_index,
dropna,
clean,
source_units,
):
columns, column_names = utils._check_temperature_cols(
temp_min_col=temp_min_col,
temp_max_col=temp_max_col,
temp_mean_col=temp_mean_col,
temp_min_required=temp_min_required,
temp_max_required=temp_max_required,
)
tsd = tsutils.common_kwds(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
clean=clean,
)
if source_units is None:
# If "source_units" keyword is None must have source_units in column name.
source_units = []
for units in tsd.columns:
words = units.split(":")
if len(words) >= 2:
source_units.append(words[1])
else:
raise ValueError(
tsutils.error_wrapper(
"""
If "source_units" are not supplied as the second ":" delimited field in the column name
they must be supplied with the "source_units" keyword. """
)
)
else:
source_units = tsutils.make_list(source_units)
if len(source_units) != len(tsd.columns):
raise ValueError(
tsutils.error_wrapper(
"""
The number of "source_units" terms must match the number of temperature columns.
"""
)
)
interim_target_units = ["degC"] * len(tsd.columns)
tsd = tsutils.common_kwds(
tsd,
source_units=source_units,
target_units=interim_target_units,
)
tsd.columns = column_names
tsd = utils._validate_temperatures(tsd, temp_min_col, temp_max_col)
return tsd
def et0_pm(
input_ts="-",
columns=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
source_units=None,
target_units=None,
print_input=False,
tablefmt="csv",
avp=None,
avp_from_tdew=None,
avp_from_twet_tdry=None,
avp_from_rhmin_rh_max=None,
avp_from_rhmax=None,
avp_from_rhmean=None,
avp_from_tmin=None,
lat=None,
):
"""Penman-Monteith evaporation."""
tsd = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts, skiprows=skiprows, names=names, index_type=index_type
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
source_units=source_units,
target_units=target_units,
clean=clean,
)
return tsd
@typic.constrained(ge=-90, le=90)
class FloatLatitude(float):
"""-90 <= float <= 90"""
@typic.al
def hamon(
lat: FloatLatitude,
temp_min_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_max_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_mean_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
k: float = 1,
source_units=None,
input_ts="-",
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""hamon"""
temp_min_required = True
temp_max_required = True
tsd = _preprocess(
input_ts,
temp_min_col,
temp_max_col,
temp_mean_col,
temp_min_required,
temp_max_required,
skiprows,
names,
index_type,
start_date,
end_date,
round_index,
dropna,
clean,
source_units,
)
decl = [declination(i) for i in tsd.index.to_pydatetime()]
w = np.arccos(-np.tan(decl) * np.tan(lat))
es = meteolib.es_calc(tsd.tmean)
N = 24 * w / np.pi
# Create new dataframe with tsd.index as index in
# order to get all of the time components correct.
pe = pd.DataFrame(0.0, index=tsd.index, columns=["pet_hamon:mm"])
pe["pet_hamon:mm"] = k * 29.8 * N * es / (273.3 + tsd.tmean)
pe.loc[tsd.tmean <= 0, "pet_hamon:mm"] = 0.0
if target_units != source_units:
pe = tsutils.common_kwds(pe, source_units="mm", target_units=target_units)
return tsutils.return_input(print_input, tsd, pe)
@typic.al
def hargreaves(
lat: FloatLatitude,
temp_min_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_max_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_mean_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
source_units=None,
input_ts="-",
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units="mm",
print_input=False,
):
"""hargreaves"""
temp_min_required = True
temp_max_required = True
tsd = _preprocess(
input_ts,
temp_min_col,
temp_max_col,
temp_mean_col,
temp_min_required,
temp_max_required,
skiprows,
names,
index_type,
start_date,
end_date,
round_index,
dropna,
clean,
source_units,
)
newra = utils.radiation(tsd, lat)
tsdiff = tsd.tmax - tsd.tmin
# Create new dataframe with tsd.index as index in
# order to get all of the time components correct.
pe = pd.DataFrame(0.0, index=tsd.index, columns=["pet_hargreaves:mm"])
pe["pet_hargreaves:mm"] = (
0.408
* 0.0023
* newra.ra.values
* np.abs(tsdiff.values) ** 0.5
* (tsd.tmean.values + 17.8)
)
if target_units != source_units:
pe = tsutils.common_kwds(pe, source_units="mm", target_units=target_units)
return tsutils.return_input(print_input, tsd, pe)
@typic.al
def oudin_form(
lat: FloatLatitude,
temp_min_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_max_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_mean_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
k1=100,
k2=5,
source_units=None,
input_ts="-",
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""oudin form"""
temp_min_required = False
temp_max_required = False
tsd = _preprocess(
input_ts,
temp_min_col,
temp_max_col,
temp_mean_col,
temp_min_required,
temp_max_required,
skiprows,
names,
index_type,
start_date,
end_date,
round_index,
dropna,
clean,
source_units,
)
newra = utils.radiation(tsd, lat)
# Create new dataframe with tsd.index as index in
# order to get all of the time components correct.
pe = pd.DataFrame(0.0, index=tsd.index, columns=["pet_oudin:mm"])
gamma = 2.45 # the latent heat flux (MJ kg−1)
rho = 1000.0 # density of water (kg m-3)
pe.loc[tsd.tmean > k2, "pet_oudin:mm"] = (
newra.ra / (gamma * rho) * (tsd.tmean + k2) / k1 * 1000
)
if target_units != source_units:
pe = tsutils.common_kwds(pe, source_units="mm", target_units=target_units)
return tsutils.return_input(print_input, tsd, pe)
@typic.al
def allen(
lat: FloatLatitude,
temp_min_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_max_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
temp_mean_col: Optional[Union[tsutils.IntGreaterEqualToOne, str]] = None,
source_units=None,
input_ts="-",
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
):
"""Allen"""
temp_min_required = False
temp_max_required = False
tsd = _preprocess(
input_ts,
temp_min_col,
temp_max_col,
temp_mean_col,
temp_min_required,
temp_max_required,
skiprows,
names,
index_type,
start_date,
end_date,
round_index,
dropna,
clean,
source_units,
)
newra = utils.radiation(tsd, lat)
# Create new dataframe with tsd.index as index in
# order to get all of the time components correct.
pe = pd.DataFrame(0.0, index=tsd.index, columns=["pet_allen:mm"])
pe["pet_allen:mm"] = (
0.408 * 0.0029 * newra.ra * (tsd.tmax - tsd.tmin) ** 0.4 * (tsd.tmean + 20)
)
if target_units != source_units:
pe = tsutils.common_kwds(pe, source_units="mm", target_units=target_units)
return tsutils.return_input(print_input, tsd, pe)
def reference():
"""reference penman-monteith"""
print("reference")
def potential():
"""potential"""
print("potential")
| bsd-3-clause |
krahman/BuildingMachineLearningSystemsWithPython | ch04/build_lda.py | 1 | 2472 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
try:
import nltk.corpus
except ImportError:
print("nltk not found")
print("please install it")
raise
from scipy.spatial import distance
import numpy as np
import string
from gensim import corpora, models, similarities
import sklearn.datasets
import nltk.stem
from collections import defaultdict
english_stemmer = nltk.stem.SnowballStemmer('english')
stopwords = set(nltk.corpus.stopwords.words('english'))
stopwords.update(['from:', 'subject:', 'writes:', 'writes'])
class DirectText(corpora.textcorpus.TextCorpus):
def get_texts(self):
return self.input
def __len__(self):
return len(self.input)
try:
dataset = sklearn.datasets.load_mlcomp("20news-18828", "train",
mlcomp_root='./data')
except:
print("Newsgroup data not found.")
print("Please download from http://mlcomp.org/datasets/379")
print("And expand the zip into the subdirectory data/")
print()
print()
raise
otexts = dataset.data
texts = dataset.data
texts = [t.decode('utf-8', 'ignore') for t in texts]
texts = [t.split() for t in texts]
texts = [map(lambda w: w.lower(), t) for t in texts]
texts = [filter(lambda s: not len(set("+-.?!()>@012345689") & set(s)), t)
for t in texts]
texts = [filter(lambda s: (len(s) > 3) and (s not in stopwords), t)
for t in texts]
texts = [map(english_stemmer.stem, t) for t in texts]
usage = defaultdict(int)
for t in texts:
for w in set(t):
usage[w] += 1
limit = len(texts) / 10
too_common = [w for w in usage if usage[w] > limit]
too_common = set(too_common)
texts = [filter(lambda s: s not in too_common, t) for t in texts]
corpus = DirectText(texts)
dictionary = corpus.dictionary
try:
dictionary['computer']
except:
pass
model = models.ldamodel.LdaModel(
corpus, num_topics=100, id2word=dictionary.id2token)
thetas = np.zeros((len(texts), 100))
for i, c in enumerate(corpus):
for ti, v in model[c]:
thetas[i, ti] += v
distances = distance.squareform(distance.pdist(thetas))
large = distances.max() + 1
for i in xrange(len(distances)):
distances[i, i] = large
print(otexts[1])
print()
print()
print()
print(otexts[distances[1].argmin()])
| mit |
olologin/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
ishanic/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 162 | 9771 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
| bsd-3-clause |
achabotl/pambox | setup.py | 1 | 3387 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from setuptools import setup
from setuptools.command.test import test as TestCommand
import codecs
import os
import re
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
# intentionally *not* adding an encoding option to open
return codecs.open(os.path.join(here, *parts), 'r').read()
def read(*parts):
# intentionally *not* adding an encoding option to open
return codecs.open(os.path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
long_description = read('README.rst')
def check_dependencies():
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
return
# Just make sure dependencies exist, I haven't rigorously
# tested what the minimal versions that will work are
# (help on that would be awesome)
try:
import numpy
except ImportError:
raise ImportError("pambox requires numpy")
try:
import scipy
except ImportError:
raise ImportError("pambox requires scipy")
try:
import matplotlib
except ImportError:
raise ImportError("pambox requires matplotlib")
try:
import pandas
except ImportError:
raise ImportError("pambox requires pandas")
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--runslow', 'pambox/tests']
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
if __name__ == '__main__':
import sys
if not (len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands', 'egg_info', '--version',
'clean'))):
check_dependencies()
setup(
name='pambox',
description='A Python toolbox for auditory modeling',
author='Alexandre Chabot-Leclerc',
author_email='pambox@alex.alexchabot.net',
version=find_version('pambox', '__init__.py'),
url='https://bitbucket.org/achabotl/pambox',
license='Modified BSD License',
tests_require=['pytest'],
install_requires=[
'six>=1.4.1',
],
cmdclass={'test': PyTest},
long_description=long_description,
packages=['pambox'],
include_package_data=True,
platforms='any',
test_suite='pambox.tests',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'
],
extras_require={
'testing': ['pytest']
}
)
| bsd-3-clause |
hmendozap/auto-sklearn | autosklearn/metalearning/metafeatures/plot_metafeatures.py | 1 | 20297 | from __future__ import print_function
import argparse
import cPickle
import itertools
import os
import StringIO
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
try:
from sklearn.manifold import TSNE
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
import sklearn.metrics.pairwise
except:
print("Failed to load TSNE, probably you're using sklearn 0.14.X")
from pyMetaLearn.metalearning.meta_base import MetaBase
import pyMetaLearn.metalearning.create_datasets
import pyMetaLearn.data_repositories.openml.apiconnector
def load_dataset(dataset, dataset_directory):
dataset_dir = os.path.abspath(os.path.join(dataset_directory, dataset))
fh = open(os.path.join(dataset_dir, dataset + ".pkl"))
ds = cPickle.load(fh)
fh.close()
data_frame = ds.convert_arff_structure_to_pandas(ds
.get_unprocessed_files())
class_ = data_frame.keys()[-1]
attributes = data_frame.keys()[0:-1]
X = data_frame[attributes]
Y = data_frame[class_]
return X, Y
def plot_metafeatures(metafeatures_plot_dir, metafeatures, metafeatures_times,
runs, method='pca', seed=1, depth=1, distance='l2'):
"""Project datasets in a 2d space and plot them.
arguments:
* metafeatures_plot_dir: a directory to save the generated plots
* metafeatures: a pandas Dataframe from the MetaBase
* runs: a dictionary of runs from the MetaBase
* method: either pca or t-sne
* seed: only used for t-sne
* depth: if 1, a one-step look-ahead is performed
"""
if type(metafeatures) != pd.DataFrame:
raise ValueError("Argument metafeatures must be of type pd.Dataframe "
"but is %s" % str(type(metafeatures)))
############################################################################
# Write out the datasets and their size as a TEX table
# TODO put this in an own function
dataset_tex = StringIO.StringIO()
dataset_tex.write('\\begin{tabular}{lrrr}\n')
dataset_tex.write('\\textbf{Dataset name} & '
'\\textbf{\#features} & '
'\\textbf{\#patterns} & '
'\\textbf{\#classes} \\\\\n')
num_features = []
num_instances = []
num_classes = []
for dataset in sorted(metafeatures.index):
dataset_tex.write('%s & %d & %d & %d \\\\\n' % (
dataset.replace('larochelle_etal_2007_', '').replace(
'_', '-'),
metafeatures.loc[dataset]['number_of_features'],
metafeatures.loc[dataset]['number_of_instances'],
metafeatures.loc[dataset]['number_of_classes']))
num_features.append(metafeatures.loc[dataset]['number_of_features'])
num_instances.append(metafeatures.loc[dataset]['number_of_instances'])
num_classes.append(metafeatures.loc[dataset]['number_of_classes'])
dataset_tex.write('Minimum & %.1f & %.1f & %.1f \\\\\n' %
(np.min(num_features), np.min(num_instances), np.min(num_classes)))
dataset_tex.write('Maximum & %.1f & %.1f & %.1f \\\\\n' %
(np.max(num_features), np.max(num_instances), np.max(num_classes)))
dataset_tex.write('Mean & %.1f & %.1f & %.1f \\\\\n' %
(np.mean(num_features), np.mean(num_instances), np.mean(num_classes)))
dataset_tex.write('10\\%% quantile & %.1f & %.1f & %.1f \\\\\n' % (
np.percentile(num_features, 10), np.percentile(num_instances, 10),
np.percentile(num_classes, 10)))
dataset_tex.write('90\\%% quantile & %.1f & %.1f & %.1f \\\\\n' % (
np.percentile(num_features, 90), np.percentile(num_instances, 90),
np.percentile(num_classes, 90)))
dataset_tex.write('median & %.1f & %.1f & %.1f \\\\\n' % (
np.percentile(num_features, 50), np.percentile(num_instances, 50),
np.percentile(num_classes, 50)))
dataset_tex.write('\\end{tabular}')
dataset_tex.seek(0)
dataset_tex_output = os.path.join(metafeatures_plot_dir, 'datasets.tex')
with open(dataset_tex_output, 'w') as fh:
fh.write(dataset_tex.getvalue())
############################################################################
# Write out a list of metafeatures, each with the min/max/mean
# calculation time and the min/max/mean value
metafeatures_tex = StringIO.StringIO()
metafeatures_tex.write('\\begin{tabular}{lrrrrrr}\n')
metafeatures_tex.write('\\textbf{Metafeature} & '
'\\textbf{Minimum} & '
'\\textbf{Mean} & '
'\\textbf{Maximum} &'
'\\textbf{Minimum time} &'
'\\textbf{Mean time} &'
'\\textbf{Maximum time} '
'\\\\\n')
for mf_name in sorted(metafeatures.columns):
metafeatures_tex.write('%s & %.2f & %.2f & %.2f & %.2f & %.2f & %.2f \\\\\n'
% (mf_name.replace('_', '-'),
metafeatures.loc[:,mf_name].min(),
metafeatures.loc[:,mf_name].mean(),
metafeatures.loc[:,mf_name].max(),
metafeature_times.loc[:, mf_name].min(),
metafeature_times.loc[:, mf_name].mean(),
metafeature_times.loc[:, mf_name].max()))
metafeatures_tex.write('\\end{tabular}')
metafeatures_tex.seek(0)
metafeatures_tex_output = os.path.join(metafeatures_plot_dir, 'metafeatures.tex')
with open(metafeatures_tex_output, 'w') as fh:
fh.write(metafeatures_tex.getvalue())
# Without this scaling the transformation for visualization purposes is
# useless
metafeatures = metafeatures.copy()
X_min = np.nanmin(metafeatures, axis=0)
X_max = np.nanmax(metafeatures, axis=0)
metafeatures = (metafeatures - X_min) / (X_max - X_min)
# PCA
if method == 'pca':
pca = PCA(2)
transformation = pca.fit_transform(metafeatures.values)
elif method == 't-sne':
if distance == 'l2':
distance_matrix = sklearn.metrics.pairwise.pairwise_distances(
metafeatures.values, metric='l2')
elif distance == 'l1':
distance_matrix = sklearn.metrics.pairwise.pairwise_distances(
metafeatures.values, metric='l1')
elif distance == 'runs':
names_to_indices = dict()
for metafeature in metafeatures.index:
idx = len(names_to_indices)
names_to_indices[metafeature] = idx
X, Y = pyMetaLearn.metalearning.create_datasets\
.create_predict_spearman_rank(metafeatures, runs,
'combination')
# Make a metric matrix out of Y
distance_matrix = np.zeros((metafeatures.shape[0],
metafeatures.shape[0]), dtype=np.float64)
for idx in Y.index:
dataset_names = idx.split("_")
d1 = names_to_indices[dataset_names[0]]
d2 = names_to_indices[dataset_names[1]]
distance_matrix[d1][d2] = Y.loc[idx]
distance_matrix[d2][d1] = Y.loc[idx]
else:
raise NotImplementedError()
# For whatever reason, tsne doesn't accept l1 metric
tsne = TSNE(random_state=seed, perplexity=50, verbose=1)
transformation = tsne.fit_transform(distance_matrix)
# Transform the transformation back to range [0, 1] to ease plotting
transformation_min = np.nanmin(transformation, axis=0)
transformation_max = np.nanmax(transformation, axis=0)
transformation = (transformation - transformation_min) / \
(transformation_max - transformation_min)
print(transformation_min, transformation_max)
#for i, dataset in enumerate(directory_content):
# print dataset, meta_feature_array[i]
fig = plt.figure(dpi=600, figsize=(12, 12))
ax = plt.subplot(111)
# The dataset names must be aligned at the borders of the plot in a way
# the arrows don't cross each other. First, define the different slots
# where the labels will be positioned and then figure out the optimal
# order of the labels
slots = []
# 25 datasets on the top y-axis
slots.extend([(-0.1 + 0.05 * i, 1.1) for i in range(25)])
# 24 datasets on the right x-axis
slots.extend([(1.1, 1.05 - 0.05 * i) for i in range(24)])
# 25 datasets on the bottom y-axis
slots.extend([(-0.1 + 0.05 * i, -0.1) for i in range(25)])
# 24 datasets on the left x-axis
slots.extend([(-0.1, 1.05 - 0.05 * i) for i in range(24)])
# Align the labels on the outer axis
labels_top = []
labels_left = []
labels_right = []
labels_bottom = []
for values in zip(metafeatures.index,
transformation[:, 0], transformation[:, 1]):
label, x, y = values
# Although all plot area goes up to 1.1, 1.1, the range of all the
# points lies inside [0,1]
if x >= y and x < 1.0 - y:
labels_bottom.append((x, label))
elif x >= y and x >= 1.0 - y:
labels_right.append((y, label))
elif y > x and x <= 1.0 -y:
labels_left.append((y, label))
else:
labels_top.append((x, label))
# Sort the labels according to their alignment
labels_bottom.sort()
labels_left.sort()
labels_left.reverse()
labels_right.sort()
labels_right.reverse()
labels_top.sort()
# Build an index label -> x, y
points = {}
for values in zip(metafeatures.index,
transformation[:, 0], transformation[:, 1]):
label, x, y = values
points[label] = (x, y)
# Find out the final positions...
positions_top = {}
positions_left = {}
positions_right = {}
positions_bottom = {}
# Find the actual positions
for i, values in enumerate(labels_bottom):
y, label = values
margin = 1.2 / len(labels_bottom)
positions_bottom[label] = (-0.05 + i * margin, -0.1,)
for i, values in enumerate(labels_left):
x, label = values
margin = 1.2 / len(labels_left)
positions_left[label] = (-0.1, 1.1 - i * margin)
for i, values in enumerate(labels_top):
y, label = values
margin = 1.2 / len(labels_top)
positions_top[label] = (-0.05 + i * margin, 1.1)
for i, values in enumerate(labels_right):
y, label = values
margin = 1.2 / len(labels_right)
positions_right[label] = (1.1, 1.05 - i * margin)
# Do greedy resorting if it decreases the number of intersections...
def resort(label_positions, marker_positions, maxdepth=1):
# TODO: are the inputs dicts or lists
# TODO: two-step look-ahead
def intersect(start1, end1, start2, end2):
# Compute if there is an intersection, for the algorithm see
# Computer Graphics by F.S.Hill
# If one vector is just a point, it cannot intersect with a line...
for v in [start1, start2, end1, end2]:
if not np.isfinite(v).all():
return False # Obviously there is no intersection
def perpendicular(d):
return np.array((-d[1], d[0]))
d1 = end1 - start1 # denoted b
d2 = end2 - start2 # denoted d
d2_1 = start2 - start1 # denoted c
d1_perp = perpendicular(d1) # denoted by b_perp
d2_perp = perpendicular(d2) # denoted by d_perp
t = np.dot(d2_1, d2_perp) / np.dot(d1, d2_perp)
u = - np.dot(d2_1, d1_perp) / np.dot(d2, d1_perp)
if 0 <= t <= 1 and 0 <= u <= 1:
return True # There is an intersection
else:
return False # There is no intersection
def number_of_intersections(label_positions, marker_positions):
num = 0
for key1, key2 in itertools.permutations(label_positions, r=2):
s1 = np.array(label_positions[key1])
e1 = np.array(marker_positions[key1])
s2 = np.array(label_positions[key2])
e2 = np.array(marker_positions[key2])
if intersect(s1, e1, s2, e2):
num += 1
return num
# test if swapping two lines would decrease the number of intersections
# TODO: if this was done with a datastructure different than dicts,
# it could be much faster, because there is a lot of redundant
# computing performed in the second iteration
def swap(label_positions, marker_positions, depth=0,
maxdepth=maxdepth, best_found=sys.maxint):
if len(label_positions) <= 1:
return
two_step_look_ahead = False
while True:
improvement = False
for key1, key2 in itertools.combinations(label_positions, r=2):
before = number_of_intersections(label_positions, marker_positions)
# swap:
tmp = label_positions[key1]
label_positions[key1] = label_positions[key2]
label_positions[key2] = tmp
if depth < maxdepth and two_step_look_ahead:
swap(label_positions, marker_positions,
depth=depth+1, best_found=before)
after = number_of_intersections(label_positions, marker_positions)
if best_found > after and before > after:
improvement = True
print(before, after)
print("Depth %d: Swapped %s with %s" %
(depth, key1, key2))
else: # swap back...
tmp = label_positions[key1]
label_positions[key1] = label_positions[key2]
label_positions[key2] = tmp
if after == 0:
break
# If it is not yet sorted perfectly, do another pass with
# two-step lookahead
if before == 0:
print("Sorted perfectly...")
break
print(depth, two_step_look_ahead)
if two_step_look_ahead:
break
if maxdepth == depth:
print("Reached maximum recursion depth...")
break
if not improvement and depth < maxdepth:
print("Still %d errors, trying two-step lookahead" % before)
two_step_look_ahead = True
swap(label_positions, marker_positions, maxdepth=maxdepth)
resort(positions_bottom, points, maxdepth=depth)
resort(positions_left, points, maxdepth=depth)
resort(positions_right, points, maxdepth=depth)
resort(positions_top, points, maxdepth=depth)
# Helper function
def plot(x, y, label_x, label_y, label, ha, va, relpos, rotation=0):
ax.scatter(x, y, marker='o', label=label, s=80, linewidths=0.1,
color='blue', edgecolor='black')
label = label.replace('larochelle_etal_2007_', '')
x = ax.annotate(label, xy=(x, y), xytext=(label_x, label_y),
ha=ha, va=va, rotation=rotation,
bbox=dict(boxstyle='round', fc='gray', alpha=0.5),
arrowprops=dict(arrowstyle='->', color='black',
relpos=relpos))
# Do the plotting
for i, key in enumerate(positions_bottom):
x, y = positions_bottom[key]
plot(points[key][0], points[key][1], x, y,
key, ha='right', va='top', rotation=45, relpos=(1, 1))
for i, key in enumerate(positions_left):
x, y = positions_left[key]
plot(points[key][0], points[key][1], x, y, key,
ha='right', va='top', rotation=45, relpos=(1, 1))
for i, key in enumerate(positions_top):
x, y = positions_top[key]
plot(points[key][0], points[key][1], x, y, key,
ha='left', va='bottom', rotation=45, relpos=(0, 0))
for i, key in enumerate(positions_right):
x, y = positions_right[key]
plot(points[key][0], points[key][1], x, y, key,
ha='left', va='bottom', rotation=45, relpos=(0, 0))
# Resize everything
box = ax.get_position()
remove = 0.05 * box.width
ax.set_position([box.x0 + remove, box.y0 + remove,
box.width - remove*2, box.height - remove*2])
locs_x = ax.get_xticks()
locs_y = ax.get_yticks()
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xlim((-0.1, 1.1))
ax.set_ylim((-0.1, 1.1))
plt.savefig(os.path.join(metafeatures_plot_dir, "pca.png"))
plt.savefig(os.path.join(metafeatures_plot_dir, "pca.pdf"))
plt.clf()
# Relation of features to each other...
#correlations = []
#for mf_1, mf_2 in itertools.combinations(metafeatures.columns, 2):
# x = metafeatures.loc[:, mf_1]
# y = metafeatures.loc[:, mf_2]
# rho, p = scipy.stats.spearmanr(x, y)
# correlations.append((rho, "%s-%s" % (mf_1, mf_2)))
# plt.figure()
# plt.plot(np.arange(0, 1, 0.01), np.arange(0, 1, 0.01))
# plt.plot(x, y, "x")
# plt.xlabel(mf_1)
# plt.ylabel(mf_2)
# plt.xlim((0, 1))
# plt.ylim((0, 1))
# plt.savefig(os.path.join(target_directory, mf_1 + "__" + mf_2 + "
# .png"))
# plt.close()
#correlations.sort()
#for cor in correlations:
#print cor
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--tasks", required=True, type=str)
parser.add_argument("--runs", type=str)
parser.add_argument("experiment_directory", type=str)
parser.add_argument("-m", "--method", default='pca',
choices=['pca', 't-sne'],
help="Dimensionality reduction method")
parser.add_argument("--distance", choices=[None, 'l1', 'l2', 'runs'],
default='l2')
parser.add_argument("-s", "--seed", default=1, type=int)
parser.add_argument("-d", "--depth", default=0, type=int)
parser.add_argument("--subset", default='all', choices=['all', 'pfahringer_2000_experiment1'])
args = parser.parse_args()
with open(args.tasks) as fh:
task_files_list = fh.readlines()
# Load all the experiment run data only if needed
if args.distance == 'runs':
with open(args.runs) as fh:
experiments_file_list = fh.readlines()
else:
experiments_file_list = StringIO.StringIO()
for i in range(len(task_files_list)):
experiments_file_list.write("\n")
experiments_file_list.seek(0)
pyMetaLearn.data_repositories.openml.apiconnector.set_local_directory(
args.experiment_directory)
meta_base = MetaBase(task_files_list, experiments_file_list)
metafeatures = meta_base.get_all_metafeatures_as_pandas(
metafeature_subset=args.subset)
metafeature_times = meta_base.get_all_metafeatures_times_as_pandas(
metafeature_subset=args.subset)
#if args.subset:
# metafeatures = metafeatures.loc[:,subsets[args.subset]]
# metafeature_times = metafeature_times.loc[:,subsets[args.subset]]
runs = meta_base.get_all_runs()
general_plot_directory = os.path.join(args.experiment_directory, "plots")
try:
os.mkdir(general_plot_directory)
except:
pass
metafeatures_plot_dir = os.path.join(general_plot_directory, "metafeatures")
try:
os.mkdir(metafeatures_plot_dir)
except:
pass
plot_metafeatures(metafeatures_plot_dir, metafeatures, metafeature_times,
runs, method=args.method, seed=args.seed,
depth=args.depth, distance=args.distance)
| bsd-3-clause |