repo_name
stringlengths 6
103
| path
stringlengths 4
209
| copies
stringclasses 325
values | size
stringlengths 4
7
| content
stringlengths 838
1.04M
| license
stringclasses 15
values |
---|---|---|---|---|---|
kastnerkyle/pylearn2 | pylearn2/scripts/datasets/make_mnistplus.py | 5 | 8862 | """
Script to generate the MNIST+ dataset. The purpose of this dataset is to make a
more challenging MNIST-like dataset, with multiple factors of variation. These
factors can serve to evaluate a model's performance at learning invariant
features, or its ability to disentangle factors of variation in a multi-task
classification setting. The dataset is stored under $PYLEARN2_DATA_PATH.
The dataset variants are created as follows. For each MNIST image, we:
1. Perform a random rotation of the image (optional)
2. Rescale the image from 28x28 to 48x48, yielding variable `image`.
3.1 Extract a random patch `textured_patch` from a fixed or random image of the
Brodatz texture dataset.
3.2 Generate mask of MNIST digit outline, by thresholding MNIST digit at 0.1
3.3 Fuse MNIST digit and textured patch as follows:
textured_patch[mask] <= image[mask]; image <= textured_patch;
4. Randomly select position of light source (optional)
5. Perform embossing operation, given fixed lighting position obtained in 4.
"""
import numpy
import pickle
import pylab as pl
from copy import copy
from optparse import OptionParser
from pylearn2.datasets import mnist
from pylearn2.utils import string_utils
import warnings
try:
from PIL import Image
except ImportError:
warnings.warn("Couldn't import Image from PIL, so far make_mnistplus "
"is only supported with PIL")
OUTPUT_SIZE = 48
DOWN_SAMPLE = 1
def to_array(img):
"""
Convert PIL.Image to numpy.ndarray.
:param img: numpy.ndarray
"""
return numpy.array(img.getdata()) / 255.
def to_img(arr, os):
"""
Convert numpy.ndarray to PIL.Image
:param arr: numpy.ndarray
:param os: integer, size of output image.
"""
return Image.fromarray(arr.reshape(os, os) * 255.)
def emboss(img, azi=45., ele=18., dep=2):
"""
Perform embossing of image `img`.
:param img: numpy.ndarray, matrix representing image to emboss.
:param azi: azimuth (in degrees)
:param ele: elevation (in degrees)
:param dep: depth, (0-100)
"""
# defining azimuth, elevation, and depth
ele = (ele * 2 * numpy.pi) / 360.
azi = (azi * 2 * numpy.pi) / 360.
a = numpy.asarray(img).astype('float')
# find the gradient
grad = numpy.gradient(a)
# (it is two arrays: grad_x and grad_y)
grad_x, grad_y = grad
# getting the unit incident ray
gd = numpy.cos(ele) # length of projection of ray on ground plane
dx = gd * numpy.cos(azi)
dy = gd * numpy.sin(azi)
dz = numpy.sin(ele)
# adjusting the gradient by the "depth" factor
# (I think this is how GIMP defines it)
grad_x = grad_x * dep / 100.
grad_y = grad_y * dep / 100.
# finding the unit normal vectors for the image
leng = numpy.sqrt(grad_x**2 + grad_y**2 + 1.)
uni_x = grad_x/leng
uni_y = grad_y/leng
uni_z = 1./leng
# take the dot product
a2 = 255 * (dx*uni_x + dy*uni_y + dz*uni_z)
# avoid overflow
a2 = a2.clip(0, 255)
# you must convert back to uint8 /before/ converting to an image
return Image.fromarray(a2.astype('uint8'))
def extract_patch(textid, os, downsample):
"""
Extract a patch of texture #textid of Brodatz dataset.
:param textid: id of texture image to load.
:param os: size of MNIST+ output images.
:param downsample: integer, downsampling factor.
"""
temp = '${PYLEARN2_DATA_PATH}/textures/brodatz/D%i.gif' % textid
fname = string_utils.preprocess(temp)
img_i = Image.open(fname)
img_i = img_i.resize((img_i.size[0]/downsample,
img_i.size[1]/downsample), Image.BILINEAR)
x = numpy.random.randint(0, img_i.size[0] - os)
y = numpy.random.randint(0, img_i.size[1] - os)
patch = img_i.crop((x, y, x+os, y+os))
return patch, (x, y)
def gendata(enable, os, downsample, textid=None, seed=2313, verbose=False):
"""
Generate the MNIST+ dataset.
:param enable: dictionary of flags with keys ['texture', 'azimuth',
'rotation', 'elevation'] to enable/disable a given factor of variation.
:param textid: if enable['texture'], id number of the Brodatz texture to
load. If textid is None, we load a random texture for each MNIST image.
:param os: output size (width and height) of MNIST+ images.
:param downsample: factor by which to downsample texture.
:param seed: integer for seeding RNG.
:param verbose: bool
"""
rng = numpy.random.RandomState(seed)
data = mnist.MNIST('train')
test = mnist.MNIST('test')
data.X = numpy.vstack((data.X, test.X))
data.y = numpy.hstack((data.y, test.y))
del test
output = {}
output['data'] = numpy.zeros((len(data.X), os*os))
output['label'] = numpy.zeros(len(data.y))
if enable['azimuth']:
output['azimuth'] = numpy.zeros(len(data.y))
if enable['elevation']:
output['elevation'] = numpy.zeros(len(data.y))
if enable['rotation']:
output['rotation'] = numpy.zeros(len(data.y))
if enable['texture']:
output['texture_id'] = numpy.zeros(len(data.y))
output['texture_pos'] = numpy.zeros((len(data.y), 2))
for i in xrange(len(data.X)):
# get MNIST image
frgd_img = to_img(data.X[i], 28)
frgd_img = frgd_img.convert('L')
if enable['rotation']:
rot = rng.randint(0, 360)
output['rotation'][i] = rot
frgd_img = frgd_img.rotate(rot, Image.BILINEAR)
frgd_img = frgd_img.resize((os, os), Image.BILINEAR)
if enable['texture']:
if textid is None:
# extract patch from texture database. Note that texture #14
# does not exist.
textid = 14
while textid == 14:
textid = rng.randint(1, 113)
patch_img, (px, py) = extract_patch(textid, os, downsample)
patch_arr = to_array(patch_img)
# store output details
output['texture_id'][i] = textid
output['texture_pos'][i] = (px, py)
# generate binary mask for digit outline
frgd_arr = to_array(frgd_img)
mask_arr = frgd_arr > 0.1
# copy contents of masked-MNIST image into background texture
blend_arr = copy(patch_arr)
blend_arr[mask_arr] = frgd_arr[mask_arr]
# this now because the image to emboss
frgd_img = to_img(blend_arr, os)
azi = 45
if enable['azimuth']:
azi = rng.randint(0, 360)
output['azimuth'][i] = azi
ele = 18.
if enable['elevation']:
ele = rng.randint(0, 60)
output['elevation'][i] = ele
mboss_img = emboss(frgd_img, azi=azi, ele=ele)
mboss_arr = to_array(mboss_img)
output['data'][i] = mboss_arr
output['label'][i] = data.y[i]
if verbose:
pl.imshow(mboss_arr.reshape(os, os))
pl.gray()
pl.show()
fname = 'mnistplus'
if enable['azimuth']:
fname += "_azi"
if enable['rotation']:
fname += "_rot"
if enable['texture']:
fname += "_tex"
fp = open(fname+'.pkl','w')
pickle.dump(output, fp, protocol=pickle.HIGHEST_PROTOCOL)
fp.close()
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-v', action='store_true', dest='verbose')
parser.add_option('--azimuth', action='store_true', dest='azimuth',
help='Enable random azimuth for light-source used in embossing.')
parser.add_option('--elevation', action='store_true', dest='elevation',
help='Enable random elevation for light-source used in embossing.')
parser.add_option('--rotation', action='store_true', dest='rotation',
help='Randomly rotate MNIST digit prior to embossing.')
parser.add_option('--texture', action='store_true', dest='texture',
help='Perform joint embossing of fused {MNIST + Texture} image.')
parser.add_option('--textid', action='store', type='int', dest='textid',
help='If specified, use a single texture ID for all MNIST images.',
default=None)
parser.add_option('--output_size', action='store', type='int', dest='os',
help='Integer specifying size of (square) output images.',
default=OUTPUT_SIZE)
parser.add_option('--downsample', action='store', type='int',
dest='downsample', default=DOWN_SAMPLE,
help='Downsampling factor for Brodatz textures.')
(opts, args) = parser.parse_args()
enable = {'texture': opts.texture,
'azimuth': opts.azimuth,
'rotation': opts.rotation,
'elevation': opts.elevation}
gendata(enable=enable, os=opts.os, downsample=opts.downsample,
verbose=opts.verbose, textid=opts.textid)
| bsd-3-clause |
nicproulx/mne-python | tutorials/plot_brainstorm_auditory.py | 3 | 16597 | # -*- coding: utf-8 -*-
"""
====================================
Brainstorm auditory tutorial dataset
====================================
Here we compute the evoked from raw for the auditory Brainstorm
tutorial dataset. For comparison, see [1]_ and:
http://neuroimage.usc.edu/brainstorm/Tutorials/Auditory
Experiment:
- One subject, 2 acquisition runs 6 minutes each.
- Each run contains 200 regular beeps and 40 easy deviant beeps.
- Random ISI: between 0.7s and 1.7s seconds, uniformly distributed.
- Button pressed when detecting a deviant with the right index finger.
The specifications of this dataset were discussed initially on the
`FieldTrip bug tracker <http://bugzilla.fcdonders.nl/show_bug.cgi?id=2300>`_.
References
----------
.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.
Brainstorm: A User-Friendly Application for MEG/EEG Analysis.
Computational Intelligence and Neuroscience, vol. 2011, Article ID
879716, 13 pages, 2011. doi:10.1155/2011/879716
"""
# Authors: Mainak Jas <mainak.jas@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Jaakko Leppakangas <jaeilepp@student.jyu.fi>
#
# License: BSD (3-clause)
import os.path as op
import pandas as pd
import numpy as np
import mne
from mne import combine_evoked
from mne.minimum_norm import apply_inverse
from mne.datasets.brainstorm import bst_auditory
from mne.io import read_raw_ctf
from mne.filter import notch_filter, filter_data
print(__doc__)
###############################################################################
# To reduce memory consumption and running time, some of the steps are
# precomputed. To run everything from scratch change this to False. With
# ``use_precomputed = False`` running time of this script can be several
# minutes even on a fast computer.
use_precomputed = True
###############################################################################
# The data was collected with a CTF 275 system at 2400 Hz and low-pass
# filtered at 600 Hz. Here the data and empty room data files are read to
# construct instances of :class:`mne.io.Raw`.
data_path = bst_auditory.data_path()
subject = 'bst_auditory'
subjects_dir = op.join(data_path, 'subjects')
raw_fname1 = op.join(data_path, 'MEG', 'bst_auditory',
'S01_AEF_20131218_01.ds')
raw_fname2 = op.join(data_path, 'MEG', 'bst_auditory',
'S01_AEF_20131218_02.ds')
erm_fname = op.join(data_path, 'MEG', 'bst_auditory',
'S01_Noise_20131218_01.ds')
###############################################################################
# In the memory saving mode we use ``preload=False`` and use the memory
# efficient IO which loads the data on demand. However, filtering and some
# other functions require the data to be preloaded in the memory.
preload = not use_precomputed
raw = read_raw_ctf(raw_fname1, preload=preload)
n_times_run1 = raw.n_times
mne.io.concatenate_raws([raw, read_raw_ctf(raw_fname2, preload=preload)])
raw_erm = read_raw_ctf(erm_fname, preload=preload)
###############################################################################
# Data channel array consisted of 274 MEG axial gradiometers, 26 MEG reference
# sensors and 2 EEG electrodes (Cz and Pz).
# In addition:
#
# - 1 stim channel for marking presentation times for the stimuli
# - 1 audio channel for the sent signal
# - 1 response channel for recording the button presses
# - 1 ECG bipolar
# - 2 EOG bipolar (vertical and horizontal)
# - 12 head tracking channels
# - 20 unused channels
#
# The head tracking channels and the unused channels are marked as misc
# channels. Here we define the EOG and ECG channels.
raw.set_channel_types({'HEOG': 'eog', 'VEOG': 'eog', 'ECG': 'ecg'})
if not use_precomputed:
# Leave out the two EEG channels for easier computation of forward.
raw.pick_types(meg=True, eeg=False, stim=True, misc=True, eog=True,
ecg=True)
###############################################################################
# For noise reduction, a set of bad segments have been identified and stored
# in csv files. The bad segments are later used to reject epochs that overlap
# with them.
# The file for the second run also contains some saccades. The saccades are
# removed by using SSP. We use pandas to read the data from the csv files. You
# can also view the files with your favorite text editor.
annotations_df = pd.DataFrame()
offset = n_times_run1
for idx in [1, 2]:
csv_fname = op.join(data_path, 'MEG', 'bst_auditory',
'events_bad_0%s.csv' % idx)
df = pd.read_csv(csv_fname, header=None,
names=['onset', 'duration', 'id', 'label'])
print('Events from run {0}:'.format(idx))
print(df)
df['onset'] += offset * (idx - 1)
annotations_df = pd.concat([annotations_df, df], axis=0)
saccades_events = df[df['label'] == 'saccade'].values[:, :3].astype(int)
# Conversion from samples to times:
onsets = annotations_df['onset'].values / raw.info['sfreq']
durations = annotations_df['duration'].values / raw.info['sfreq']
descriptions = annotations_df['label'].values
annotations = mne.Annotations(onsets, durations, descriptions)
raw.annotations = annotations
del onsets, durations, descriptions
###############################################################################
# Here we compute the saccade and EOG projectors for magnetometers and add
# them to the raw data. The projectors are added to both runs.
saccade_epochs = mne.Epochs(raw, saccades_events, 1, 0., 0.5, preload=True,
reject_by_annotation=False)
projs_saccade = mne.compute_proj_epochs(saccade_epochs, n_mag=1, n_eeg=0,
desc_prefix='saccade')
if use_precomputed:
proj_fname = op.join(data_path, 'MEG', 'bst_auditory',
'bst_auditory-eog-proj.fif')
projs_eog = mne.read_proj(proj_fname)[0]
else:
projs_eog, _ = mne.preprocessing.compute_proj_eog(raw.load_data(),
n_mag=1, n_eeg=0)
raw.add_proj(projs_saccade)
raw.add_proj(projs_eog)
del saccade_epochs, saccades_events, projs_eog, projs_saccade # To save memory
###############################################################################
# Visually inspect the effects of projections. Click on 'proj' button at the
# bottom right corner to toggle the projectors on/off. EOG events can be
# plotted by adding the event list as a keyword argument. As the bad segments
# and saccades were added as annotations to the raw data, they are plotted as
# well.
raw.plot(block=True)
###############################################################################
# Typical preprocessing step is the removal of power line artifact (50 Hz or
# 60 Hz). Here we notch filter the data at 60, 120 and 180 to remove the
# original 60 Hz artifact and the harmonics. The power spectra are plotted
# before and after the filtering to show the effect. The drop after 600 Hz
# appears because the data was filtered during the acquisition. In memory
# saving mode we do the filtering at evoked stage, which is not something you
# usually would do.
if not use_precomputed:
meg_picks = mne.pick_types(raw.info, meg=True, eeg=False)
raw.plot_psd(tmax=np.inf, picks=meg_picks)
notches = np.arange(60, 181, 60)
raw.notch_filter(notches)
raw.plot_psd(tmax=np.inf, picks=meg_picks)
###############################################################################
# We also lowpass filter the data at 100 Hz to remove the hf components.
if not use_precomputed:
raw.filter(None, 100., h_trans_bandwidth=0.5, filter_length='10s',
phase='zero-double')
###############################################################################
# Epoching and averaging.
# First some parameters are defined and events extracted from the stimulus
# channel (UPPT001). The rejection thresholds are defined as peak-to-peak
# values and are in T / m for gradiometers, T for magnetometers and
# V for EOG and EEG channels.
tmin, tmax = -0.1, 0.5
event_id = dict(standard=1, deviant=2)
reject = dict(mag=4e-12, eog=250e-6)
# find events
events = mne.find_events(raw, stim_channel='UPPT001')
###############################################################################
# The event timing is adjusted by comparing the trigger times on detected
# sound onsets on channel UADC001-4408.
sound_data = raw[raw.ch_names.index('UADC001-4408')][0][0]
onsets = np.where(np.abs(sound_data) > 2. * np.std(sound_data))[0]
min_diff = int(0.5 * raw.info['sfreq'])
diffs = np.concatenate([[min_diff + 1], np.diff(onsets)])
onsets = onsets[diffs > min_diff]
assert len(onsets) == len(events)
diffs = 1000. * (events[:, 0] - onsets) / raw.info['sfreq']
print('Trigger delay removed (μ ± σ): %0.1f ± %0.1f ms'
% (np.mean(diffs), np.std(diffs)))
events[:, 0] = onsets
del sound_data, diffs
###############################################################################
# We mark a set of bad channels that seem noisier than others. This can also
# be done interactively with ``raw.plot`` by clicking the channel name
# (or the line). The marked channels are added as bad when the browser window
# is closed.
raw.info['bads'] = ['MLO52-4408', 'MRT51-4408', 'MLO42-4408', 'MLO43-4408']
###############################################################################
# The epochs (trials) are created for MEG channels. First we find the picks
# for MEG and EOG channels. Then the epochs are constructed using these picks.
# The epochs overlapping with annotated bad segments are also rejected by
# default. To turn off rejection by bad segments (as was done earlier with
# saccades) you can use keyword ``reject_by_annotation=False``.
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=False,
proj=True)
###############################################################################
# We only use first 40 good epochs from each run. Since we first drop the bad
# epochs, the indices of the epochs are no longer same as in the original
# epochs collection. Investigation of the event timings reveals that first
# epoch from the second run corresponds to index 182.
epochs.drop_bad()
epochs_standard = mne.concatenate_epochs([epochs['standard'][range(40)],
epochs['standard'][182:222]])
epochs_standard.load_data() # Resampling to save memory.
epochs_standard.resample(600, npad='auto')
epochs_deviant = epochs['deviant'].load_data()
epochs_deviant.resample(600, npad='auto')
del epochs, picks
###############################################################################
# The averages for each conditions are computed.
evoked_std = epochs_standard.average()
evoked_dev = epochs_deviant.average()
del epochs_standard, epochs_deviant
###############################################################################
# Typical preprocessing step is the removal of power line artifact (50 Hz or
# 60 Hz). Here we notch filter the data at 60, 120 and 180 to remove the
# original 60 Hz artifact and the harmonics. Normally this would be done to
# raw data (with :func:`mne.io.Raw.filter`), but to reduce memory consumption
# of this tutorial, we do it at evoked stage.
if use_precomputed:
sfreq = evoked_std.info['sfreq']
notches = [60, 120, 180]
for evoked in (evoked_std, evoked_dev):
evoked.data[:] = notch_filter(evoked.data, sfreq, notches)
evoked.data[:] = filter_data(evoked.data, sfreq, l_freq=None,
h_freq=100.)
###############################################################################
# Here we plot the ERF of standard and deviant conditions. In both conditions
# we can see the P50 and N100 responses. The mismatch negativity is visible
# only in the deviant condition around 100-200 ms. P200 is also visible around
# 170 ms in both conditions but much stronger in the standard condition. P300
# is visible in deviant condition only (decision making in preparation of the
# button press). You can view the topographies from a certain time span by
# painting an area with clicking and holding the left mouse button.
evoked_std.plot(window_title='Standard', gfp=True)
evoked_dev.plot(window_title='Deviant', gfp=True)
###############################################################################
# Show activations as topography figures.
times = np.arange(0.05, 0.301, 0.025)
evoked_std.plot_topomap(times=times, title='Standard')
evoked_dev.plot_topomap(times=times, title='Deviant')
###############################################################################
# We can see the MMN effect more clearly by looking at the difference between
# the two conditions. P50 and N100 are no longer visible, but MMN/P200 and
# P300 are emphasised.
evoked_difference = combine_evoked([evoked_dev, -evoked_std], weights='equal')
evoked_difference.plot(window_title='Difference', gfp=True)
###############################################################################
# Source estimation.
# We compute the noise covariance matrix from the empty room measurement
# and use it for the other runs.
reject = dict(mag=4e-12)
cov = mne.compute_raw_covariance(raw_erm, reject=reject)
cov.plot(raw_erm.info)
del raw_erm
###############################################################################
# The transformation is read from a file. More information about coregistering
# the data, see :ref:`ch_interactive_analysis` or
# :func:`mne.gui.coregistration`.
trans_fname = op.join(data_path, 'MEG', 'bst_auditory',
'bst_auditory-trans.fif')
trans = mne.read_trans(trans_fname)
###############################################################################
# To save time and memory, the forward solution is read from a file. Set
# ``use_precomputed=False`` in the beginning of this script to build the
# forward solution from scratch. The head surfaces for constructing a BEM
# solution are read from a file. Since the data only contains MEG channels, we
# only need the inner skull surface for making the forward solution. For more
# information: :ref:`CHDBBCEJ`, :func:`mne.setup_source_space`,
# :ref:`create_bem_model`, :func:`mne.bem.make_watershed_bem`.
if use_precomputed:
fwd_fname = op.join(data_path, 'MEG', 'bst_auditory',
'bst_auditory-meg-oct-6-fwd.fif')
fwd = mne.read_forward_solution(fwd_fname)
else:
src = mne.setup_source_space(subject, spacing='ico4',
subjects_dir=subjects_dir, overwrite=True)
model = mne.make_bem_model(subject=subject, ico=4, conductivity=[0.3],
subjects_dir=subjects_dir)
bem = mne.make_bem_solution(model)
fwd = mne.make_forward_solution(evoked_std.info, trans=trans, src=src,
bem=bem)
inv = mne.minimum_norm.make_inverse_operator(evoked_std.info, fwd, cov)
snr = 3.0
lambda2 = 1.0 / snr ** 2
del fwd
###############################################################################
# The sources are computed using dSPM method and plotted on an inflated brain
# surface. For interactive controls over the image, use keyword
# ``time_viewer=True``.
# Standard condition.
stc_standard = mne.minimum_norm.apply_inverse(evoked_std, inv, lambda2, 'dSPM')
brain = stc_standard.plot(subjects_dir=subjects_dir, subject=subject,
surface='inflated', time_viewer=False, hemi='lh',
initial_time=0.1, time_unit='s')
del stc_standard, brain
###############################################################################
# Deviant condition.
stc_deviant = mne.minimum_norm.apply_inverse(evoked_dev, inv, lambda2, 'dSPM')
brain = stc_deviant.plot(subjects_dir=subjects_dir, subject=subject,
surface='inflated', time_viewer=False, hemi='lh',
initial_time=0.1, time_unit='s')
del stc_deviant, brain
###############################################################################
# Difference.
stc_difference = apply_inverse(evoked_difference, inv, lambda2, 'dSPM')
brain = stc_difference.plot(subjects_dir=subjects_dir, subject=subject,
surface='inflated', time_viewer=False, hemi='lh',
initial_time=0.15, time_unit='s')
| bsd-3-clause |
alvarouc/polyssifier | polyssifier/poly_utils.py | 1 | 10338 | from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import (LogisticRegression,
LinearRegression,
BayesianRidge,
Ridge, Lasso,
ElasticNet, Lars, LassoLars,
OrthogonalMatchingPursuit,
PassiveAggressiveRegressor)
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier as MLP
from sklearn.gaussian_process import GaussianProcessRegressor
import collections
import numpy as np
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.gaussian_process.kernels import RBF
class MyVoter(object):
"""
Voter Classifier
Receives fitted classifiers and runs majority voting
"""
def __init__(self, estimators):
'''
estimators: List of fitted classifiers
'''
self.estimators_ = estimators
def predict(self, X):
predictions = np.asarray(
[clf.predict(X) for clf in self.estimators_]).T
maj = np.apply_along_axis(
lambda x: np.argmax(np.bincount(x)), axis=1,
arr=predictions.astype('int'))
return maj
class MyRegressionAverager(object):
"""
Regression averager
Receives fitted regressors and averages the predictions of the regressors.
"""
def __init__(self, estimators):
'''
estimators: List of fitted regressors
'''
self.estimators_ = estimators
def predict(self, X):
predictions = np.asarray(
[reg.predict(X) for reg in self.estimators_]).T
avg = np.average(predictions, axis=1)
return avg
class MyRegressionMedianer(object):
"""
Regression averager
Receives fitted regressors and averages the predictions of the regressors.
"""
def __init__(self, estimators):
'''
estimators: List of fitted regressors
'''
self.estimators_ = estimators
def predict(self, X):
predictions = np.asarray(
[reg.predict(X) for reg in self.estimators_]).T
avg = np.median(predictions, axis=1)
return avg
def build_classifiers(exclude, scale, feature_selection, nCols):
'''
Input:
- exclude: list of names of classifiers to exclude from the analysis
- scale: True or False. Scale data before fitting classifier
- feature_selection: True or False. Run feature selection before
fitting classifier
- nCols: Number of columns in input dataset to classifiers
Output:
Dictionary with classifier name as keys.
- 'clf': Classifier object
- 'parameters': Dictionary with parameters of 'clf' as keys
'''
classifiers = collections.OrderedDict()
if 'Multilayer Perceptron' not in exclude:
classifiers['Multilayer Perceptron'] = {
'clf': MLP(),
'parameters': {'hidden_layer_sizes': [(100, 50), (50, 25)],
'max_iter': [500]}
}
if 'Nearest Neighbors' not in exclude:
classifiers['Nearest Neighbors'] = {
'clf': KNeighborsClassifier(),
'parameters': {'n_neighbors': [1, 5, 10, 20]}}
if 'SVM' not in exclude:
classifiers['SVM'] = {
'clf': SVC(C=1, probability=True, cache_size=10000,
class_weight='balanced'),
'parameters': {'kernel': ['rbf', 'poly'],
'C': [0.01, 0.1, 1]}}
if 'Linear SVM' not in exclude:
classifiers['Linear SVM'] = {
'clf': LinearSVC(dual=False, class_weight='balanced'),
'parameters': {'C': [0.01, 0.1, 1],
'penalty': ['l1', 'l2']}}
if 'Decision Tree' not in exclude:
classifiers['Decision Tree'] = {
'clf': DecisionTreeClassifier(max_depth=None,
max_features='auto'),
'parameters': {}}
if 'Random Forest' not in exclude:
classifiers['Random Forest'] = {
'clf': RandomForestClassifier(max_depth=None,
n_estimators=10,
max_features='auto'),
'parameters': {'n_estimators': list(range(5, 20))}}
if 'Logistic Regression' not in exclude:
classifiers['Logistic Regression'] = {
'clf': LogisticRegression(fit_intercept=True, solver='lbfgs',
penalty='l2'),
'parameters': {'C': [0.001, 0.1, 1]}}
if 'Naive Bayes' not in exclude:
classifiers['Naive Bayes'] = {
'clf': GaussianNB(),
'parameters': {}}
# classifiers['Voting'] = {}
def name(x):
"""
:param x: The name of the classifier
:return: The class of the final estimator in lower case form
"""
return x['clf']._final_estimator.__class__.__name__.lower()
for key, val in classifiers.items():
if not scale and not feature_selection:
break
steps = []
if scale:
steps.append(StandardScaler())
if feature_selection:
steps.append(SelectKBest(f_regression, k='all'))
steps.append(classifiers[key]['clf'])
classifiers[key]['clf'] = make_pipeline(*steps)
# Reorganize paramenter list for grid search
new_dict = {}
for keyp in classifiers[key]['parameters']:
new_dict[name(classifiers[key]) + '__' +
keyp] = classifiers[key]['parameters'][keyp]
classifiers[key]['parameters'] = new_dict
if nCols > 5 and feature_selection:
classifiers[key]['parameters']['selectkbest__k'] = np.linspace(
np.round(nCols / 5), nCols, 5).astype('int').tolist()
return classifiers
def build_regressors(exclude, scale, feature_selection, nCols):
'''
This method builds an ordered dictionary of regressors, where the key is the name of the
regressor and the value of each key contains a standard dictionary with two keys itself. The first key called
'reg' points to the regression object, which is created by scikit learn. The second key called 'parameters'
points to another regular map containing the parameters which are associated with the particular regression model.
These parameters are used by grid search in polyssifier.py when finding the best model. If parameters are not
defined then grid search is not performed on that particular regression model, so the model's default parameters
are used instead to find the best model for the particular data.
'''
regressors = collections.OrderedDict()
if 'Linear Regression' not in exclude:
regressors['Linear Regression'] = {
'reg': LinearRegression(),
'parameters': {} # Best to leave default parameters
}
if 'Bayesian Ridge' not in exclude:
regressors['Bayesian Ridge'] = {
'reg': BayesianRidge(),
'parameters': {} # Investigate if alpha and lambda parameters should be changed
}
if 'PassiveAggressiveRegressor' not in exclude:
regressors['PassiveAggressiveRegressor'] = {
'reg': PassiveAggressiveRegressor(),
'parameters': {'C': [0.5, 1.0, 1.5]
}
}
if 'GaussianProcessRegressor' not in exclude:
regressors['GaussianProcessRegressor'] = {
'reg': GaussianProcessRegressor(),
'parameters': {
'alpha': [0.01, 0.1, 1.0, 10.0],
'kernel': [RBF(x) for x in [0.01, 1.0, 100.0, 1000.0]],
}
}
if 'Ridge' not in exclude:
regressors['Ridge'] = {
'reg': Ridge(),
'parameters': {
'alpha': [0.25, 0.50, 0.75, 1.00]
}
}
if 'Lasso' not in exclude:
regressors['Lasso'] = {
'reg': Lasso(),
'parameters': {
'alpha': [0.25, 0.50, 0.75, 1.00]
}
}
if 'Lars' not in exclude:
regressors['Lars'] = {
'reg': Lars(),
'parameters': {} # Best to leave the default parameters
}
if 'LassoLars' not in exclude:
regressors['LassoLars'] = {
'reg': LassoLars(),
'parameters': {'alpha': [0.25, 0.50, 0.75, 1.00, 10.0]}
}
if 'OrthogonalMatchingPursuit' not in exclude:
regressors['OrthogonalMatchingPursuit'] = {
'reg': OrthogonalMatchingPursuit(),
'parameters': {} # Best to leave default parameters
}
if 'ElasticNet' not in exclude:
regressors['ElasticNet'] = {
'reg': ElasticNet(),
'parameters': {'alpha': [0.25, 0.50, 0.75, 1.00],
'l1_ratio': [0.25, 0.50, 0.75, 1.00]}
}
def name(x):
"""
:param x: The name of the regressor
:return: The class of the final regression estimator in lower case form
"""
return x['reg']._final_estimator.__class__.__name__.lower()
for key, val in regressors.items():
if not scale and not feature_selection:
break
steps = []
if scale:
steps.append(StandardScaler())
if feature_selection:
steps.append(SelectKBest(f_regression, k='all'))
steps.append(regressors[key]['reg'])
regressors[key]['reg'] = make_pipeline(*steps)
# Reorganize paramenter list for grid search
new_dict = {}
for keyp in regressors[key]['parameters']:
new_dict[name(regressors[key]) + '__' +
keyp] = regressors[key]['parameters'][keyp]
regressors[key]['parameters'] = new_dict
if nCols > 5 and feature_selection:
regressors[key]['parameters']['selectkbest__k'] = np.linspace(
np.round(nCols / 5), nCols, 5).astype('int').tolist()
return regressors
| gpl-2.0 |
UMWRG/HydraPlatform | HydraServer/python/HydraServer/plugins/timeseries_functions.py | 2 | 5406 | # (c) Copyright 2013, 2014, University of Manchester
#
# HydraPlatform is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HydraPlatform is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HydraPlatform. If not, see <http://www.gnu.org/licenses/>
#
from spyne.decorator import rpc
from spyne.model.primitive import Integer, Unicode, AnyDict
from HydraServer.soap_server.hydra_base import HydraService
from HydraServer.lib.data import get_dataset
from HydraLib.HydraException import HydraError
from HydraServer.util import get_val
import logging
import numpy
import json
log = logging.getLogger(__name__)
op_map = {
'add' : lambda x, y: numpy.add(x, y),
'subtract' : lambda x, y: numpy.subtract(x, y),
'multiply' : lambda x, y: numpy.multiply(x, y),
'divide' : lambda x, y: numpy.divide(x, y),
'avg' : lambda x : numpy.mean(x),
'stddev' : lambda x : numpy.std(x),
}
class Service(HydraService):
__service_name__ = "TimeseriesService"
@rpc(Integer(min_occurs=2, max_occurs='unbounded'), _returns=Unicode)
def subtract_datasets(ctx, dataset_ids):
"""
Subtract the value of dataset[1] from the value of dataset[0].
subtract dataset[2] from result etc.
Rules: 1: The datasets must be of the same type
2: The datasets must be numerical
3: If timeseries, the timesteps must match.
The result is a new value, NOT a new dataset. It is up
to the client to create a new datasets with the resulting value
if they wish to do so.
"""
return _perform_op_on_datasets('subtract', dataset_ids, **ctx.in_header.__dict__)
@rpc(Integer(min_occurs=2, max_occurs='unbounded'), _returns=Unicode)
def add_datasets(ctx, dataset_ids):
"""
Add the value of dataset[0] to the value of dataset[1] etc.
Rules: 1: The datasets must be of the same type
2: The datasets must be numerical
3: If timeseries, the timesteps must match.
The result is a new value, NOT a new dataset. It is up
to the client to create a new datasets with the resulting value
if they wish to do so.
"""
return _perform_op_on_datasets('add', dataset_ids, **ctx.in_header.__dict__)
@rpc(Integer(min_occurs=2, max_occurs='unbounded'), _returns=Unicode)
def multiply_datasets(ctx, dataset_ids):
"""
Multiply the value of dataset[0] by the value of dataset[1] and the result
by the value of dataset[2] etc.
Rules: 1: The datasets must be of the same type
2: The datasets must be numerical
3: If timeseries, the timesteps must match.
The result is a new value, NOT a new dataset. It is up
to the client to create a new datasets with the resulting value
if they wish to do so.
"""
return _perform_op_on_datasets('multiply', dataset_ids, **ctx.in_header.__dict__)
@rpc(Integer(min_occurs=2, max_occurs='unbounded'), _returns=Unicode)
def divide_datasets(ctx, dataset_ids):
"""
Divide the value of dataset[0] by the value of dataset[1], the
result of which is divided by the value of dataset[2] etc.
Rules: 1: The datasets must be of the same type
2: The datasets must be numerical
3: If timeseries, the timesteps must match.
The result is a new value, NOT a new dataset. It is up
to the client to create a new datasets with the resulting value
if they wish to do so.
"""
return _perform_op_on_datasets('divide', dataset_ids, **ctx.in_header.__dict__)
def _perform_op_on_datasets(op, dataset_ids, **kwargs):
datasets = []
for dataset_id in dataset_ids:
datasets.append(get_dataset(dataset_id, **kwargs))
data_type = None
vals = []
for d in datasets:
if data_type is None:
data_type = d.data_type
if data_type == 'descriptor':
raise HydraError("Data must be numerical")
else:
if d.data_type != d.data_type:
raise HydraError("Data types do not match.")
dataset_val = get_val(d)
if data_type == 'timeseries':
dataset_val = dataset_val.astype('float')
vals.append(dataset_val)
_op = op_map[op]
op_result = vals[0]
for v in vals[1:]:
try:
op_result = _op(op_result, v)
except:
raise HydraError("Unable to perform operation %s on values %s and %s"
%(op, op_result, v))
if data_type == 'timeseries':
return op_result.to_json(date_format='iso', date_unit='ns')
elif data_type == 'array':
return json.dumps(list(op_result))
else:
return json.dumps(str(op_result))
| gpl-3.0 |
kastnerkyle/pylearn2 | pylearn2/scripts/datasets/step_through_small_norb.py | 49 | 3123 | #! /usr/bin/env python
"""
A script for sequentially stepping through SmallNORB, viewing each image and
its label.
Intended as a demonstration of how to iterate through NORB images,
and as a way of testing SmallNORB's StereoViewConverter.
If you just want an image viewer, consider
pylearn2/scripts/show_binocular_grayscale_images.py,
which is not specific to SmallNORB.
"""
__author__ = "Matthew Koichi Grimes"
__copyright__ = "Copyright 2010-2014, Universite de Montreal"
__credits__ = __author__
__license__ = "3-clause BSD"
__maintainer__ = __author__
__email__ = "mkg alum mit edu (@..)"
import argparse, pickle, sys
from matplotlib import pyplot
from pylearn2.datasets.norb import SmallNORB
from pylearn2.utils import safe_zip
def main():
def parse_args():
parser = argparse.ArgumentParser(
description="Step-through visualizer for SmallNORB dataset")
parser.add_argument("--which_set",
default='train',
required=True,
help=("'train', 'test', or the path to a "
"SmallNORB .pkl file"))
return parser.parse_args()
def load_norb(args):
if args.which_set in ('test', 'train'):
return SmallNORB(args.which_set, True)
else:
norb_file = open(args.which_set)
return pickle.load(norb_file)
args = parse_args()
norb = load_norb(args)
topo_space = norb.view_converter.topo_space # does not include label space
vec_space = norb.get_data_specs()[0].components[0]
figure, axes = pyplot.subplots(1, 2, squeeze=True)
figure.suptitle("Press space to step through, or 'q' to quit.")
def draw_and_increment(iterator):
"""
Draws the image pair currently pointed at by the iterator,
then increments the iterator.
"""
def draw(batch_pair):
for axis, image_batch in safe_zip(axes, batch_pair):
assert image_batch.shape[0] == 1
grayscale_image = image_batch[0, :, :, 0]
axis.imshow(grayscale_image, cmap='gray')
figure.canvas.draw()
def get_values_and_increment(iterator):
try:
vec_stereo_pair, labels = norb_iter.next()
except StopIteration:
return (None, None)
topo_stereo_pair = vec_space.np_format_as(vec_stereo_pair,
topo_space)
return topo_stereo_pair, labels
batch_pair, labels = get_values_and_increment(norb_iter)
draw(batch_pair)
norb_iter = norb.iterator(mode='sequential',
batch_size=1,
data_specs=norb.get_data_specs())
def on_key_press(event):
if event.key == ' ':
draw_and_increment(norb_iter)
if event.key == 'q':
sys.exit(0)
figure.canvas.mpl_connect('key_press_event', on_key_press)
draw_and_increment(norb_iter)
pyplot.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
OpenMined/PySyft | benchmarks/macro_executor.py | 1 | 3935 | # stdlib
from datetime import date
import json
import os
from pathlib import Path
import subprocess
from time import time
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
# third party
import pyarrow.parquet as pq
# syft absolute
import syft as sy
from syft.core.adp.data_subject_list import DataSubjectList
from syft.core.node.common.node_service.user_manager.user_messages import (
UpdateUserMessage,
)
from syft.util import download_file
from syft.util import get_root_data_path
benchmark_report: dict = {}
today = date.today()
date = today.strftime("%B %d, %Y")
benchmark_report["date"] = date
def get_git_revision_short_hash() -> str:
return (
subprocess.check_output(["git", "rev-parse", "--short", "HEAD"])
.decode("ascii")
.strip()
)
benchmark_report["git_revision_hash"] = get_git_revision_short_hash()
def download_spicy_bird_benchmark(
sizes: Optional[List[str]] = None,
) -> Tuple[Dict[str, Path], List[str]]:
sizes = sizes if sizes else ["100K", "250K", "500K", "750K", "1M", "1B"]
file_suffix = "_rows_dataset_sample.parquet"
BASE_URL = "https://raw.githubusercontent.com/madhavajay/datasets/main/spicy_bird/"
folder_name = "spicy_bird"
dataset_path = get_root_data_path() / folder_name
paths = []
for size in sizes:
filename = f"{size}{file_suffix}"
full_path = dataset_path / filename
url = f"{BASE_URL}{filename}"
if not os.path.exists(full_path):
print(url)
path = download_file(url=url, full_path=full_path)
else:
path = Path(full_path)
paths.append(path)
return dict(zip(sizes, paths)), sizes
key_size = "1B"
files, ordered_sizes = download_spicy_bird_benchmark(sizes=[key_size])
data_file = files[key_size]
benchmark_report["data_row_size"] = key_size
t0 = time()
df = pq.read_table(data_file)
end_time = time()
tf = round(time() - t0, 4)
print(f"Time taken to read parquet file: {round(tf, 2)} seconds")
benchmark_report["read_parquet"] = tf
t0 = time()
impressions = df["impressions"].to_numpy()
data_subjects = DataSubjectList.from_series(df["user_id"])
tf = round(time() - t0, 4)
benchmark_report["data_subject_list_creation"] = tf
print(f"Time taken to create inputs for Syft Tensor: {round(tf,2)} seconds")
t0 = time()
tweets_data = sy.Tensor(impressions).annotate_with_dp_metadata(
lower_bound=70, upper_bound=2000, data_subjects=data_subjects
)
tf = round(time() - t0, 4)
print(f"Time taken to make Private Syft Tensor: {round(tf,2)} seconds")
benchmark_report["make_private_syft_tensor"] = tf
# login to domain
domain_node = sy.login(email="info@openmined.org", password="changethis", port=9082)
# Upgrade admins budget
content = {"user_id": 1, "budget": 9_999_999}
domain_node._perform_grid_request(grid_msg=UpdateUserMessage, content=content)
dataset_name = "1B Tweets dataset"
t0 = time()
domain_node.load_dataset(
assets={"1B Tweets dataset": tweets_data},
name=dataset_name,
description=" Tweets- 1B rows",
)
tf = round(time() - t0, 3)
print(f"Time taken to load {dataset_name} dataset: {tf} seconds")
benchmark_report["load_dataset"] = tf
data = domain_node.datasets[-1]["1B Tweets dataset"]
print(data)
sum_result = data.sum()
try:
t0 = time()
sum_result.block
tf = round(time() - t0, 3)
except Exception as e:
print(e)
print(f"Time taken to get sum: {tf} seconds")
benchmark_report["get_sum"] = tf
# Sum result publish
published_result = sum_result.publish(sigma=1e6)
t0 = time()
published_result.block
tf = round(time() - t0, 3)
print(f"Time taken to publish: {tf} seconds")
benchmark_report["publish"] = tf
print(benchmark_report)
benchmark_report_json = json.dumps(benchmark_report, indent=4)
print(benchmark_report_json)
with open("macro_benchmark.json", "w") as outfile:
outfile.write(benchmark_report_json)
| apache-2.0 |
shangwuhencc/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 265 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
lakshayg/tensorflow | tensorflow/contrib/slim/python/slim/data/dataset_data_provider.py | 53 | 4253 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataProvider that provides data from a Dataset.
DatasetDataProviders provide data from datasets. The provide can be configured
to use multiple readers simultaneously or read via a single reader.
Additionally, the data being read can be optionally shuffled.
For example, to read data using a single thread without shuffling:
pascal_voc_data_provider = DatasetDataProvider(
slim.datasets.pascal_voc.get_split('train'),
shuffle=False)
images, labels = pascal_voc_data_provider.get(['images', 'labels'])
To read data using multiple readers simultaneous with shuffling:
pascal_voc_data_provider = DatasetDataProvider(
slim.datasets.pascal_voc.Dataset(),
num_readers=10,
shuffle=True)
images, labels = pascal_voc_data_provider.get(['images', 'labels'])
Equivalently, one may request different fields of the same sample separately:
[images] = pascal_voc_data_provider.get(['images'])
[labels] = pascal_voc_data_provider.get(['labels'])
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.slim.python.slim.data import data_provider
from tensorflow.contrib.slim.python.slim.data import parallel_reader
class DatasetDataProvider(data_provider.DataProvider):
def __init__(self,
dataset,
num_readers=1,
reader_kwargs=None,
shuffle=True,
num_epochs=None,
common_queue_capacity=256,
common_queue_min=128,
record_key='record_key',
seed=None,
scope=None):
"""Creates a DatasetDataProvider.
Note: if `num_epochs` is not `None`, local counter `epochs` will be created
by relevant function. Use `local_variables_initializer()` to initialize
local variables.
Args:
dataset: An instance of the Dataset class.
num_readers: The number of parallel readers to use.
reader_kwargs: An optional dict of kwargs for the reader.
shuffle: Whether to shuffle the data sources and common queue when
reading.
num_epochs: The number of times each data source is read. If left as None,
the data will be cycled through indefinitely.
common_queue_capacity: The capacity of the common queue.
common_queue_min: The minimum number of elements in the common queue after
a dequeue.
record_key: The item name to use for the dataset record keys in the
provided tensors.
seed: The seed to use if shuffling.
scope: Optional name scope for the ops.
Raises:
ValueError: If `record_key` matches one of the items in the dataset.
"""
key, data = parallel_reader.parallel_read(
dataset.data_sources,
reader_class=dataset.reader,
num_epochs=num_epochs,
num_readers=num_readers,
reader_kwargs=reader_kwargs,
shuffle=shuffle,
capacity=common_queue_capacity,
min_after_dequeue=common_queue_min,
seed=seed,
scope=scope)
items = dataset.decoder.list_items()
tensors = dataset.decoder.decode(data, items)
items_to_tensors = dict(zip(items, tensors))
if record_key in items_to_tensors:
raise ValueError('The item name used for `record_key` cannot also be '
'used for a dataset item: %s', record_key)
items_to_tensors[record_key] = key
super(DatasetDataProvider, self).__init__(
items_to_tensors=items_to_tensors,
num_samples=dataset.num_samples)
| apache-2.0 |
wavycloud/pyboto3 | pyboto3/glue.py | 1 | 692979 | '''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def batch_create_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionInputList=None):
"""
Creates one or more partitions in a batch operation.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_create_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionInputList=[
{
'Values': [
'string',
],
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
},
]
)
:type CatalogId: string
:param CatalogId: The ID of the catalog in which the partition is to be created. Currently, this should be the AWS account ID.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the metadata database in which the partition is to be created.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the metadata table in which the partition is to be created.\n
:type PartitionInputList: list
:param PartitionInputList: [REQUIRED]\nA list of PartitionInput structures that define the partitions to be created.\n\n(dict) --The structure used to create and update a partition.\n\nValues (list) --The values of the partition. Although this parameter is not required by the SDK, you must specify this parameter for a valid input.\nThe values for the keys for the new partition must be passed as an array of String objects that must be ordered in the same order as the partition keys appearing in the Amazon S3 prefix. Otherwise AWS Glue will add the values to the wrong keys.\n\n(string) --\n\n\nLastAccessTime (datetime) --The last time at which the partition was accessed.\n\nStorageDescriptor (dict) --Provides information about the physical location where the partition is stored.\n\nColumns (list) --A list of the Columns in the table.\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\nTrue if the data in the table is compressed, or False if not.\n\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --The serialization/deserialization (SerDe) information.\n\nName (string) --Name of the SerDe.\n\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\n\n(dict) --Specifies the sort order of a sorted column.\n\nColumn (string) -- [REQUIRED]The name of the column.\n\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --The user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\nParameters (dict) --These key-value pairs define partition parameters.\n\n(string) --\n(string) --\n\n\n\n\nLastAnalyzedTime (datetime) --The last time at which column statistics were computed for this partition.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Errors': [
{
'PartitionValues': [
'string',
],
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
Response Structure
(dict) --
Errors (list) --
The errors encountered when trying to create the requested partitions.
(dict) --
Contains information about a partition error.
PartitionValues (list) --
The values that define the partition.
(string) --
ErrorDetail (dict) --
The details about the partition error.
ErrorCode (string) --
The code associated with this error.
ErrorMessage (string) --
A message describing the error.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Errors': [
{
'PartitionValues': [
'string',
],
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
:returns:
(string) --
"""
pass
def batch_delete_connection(CatalogId=None, ConnectionNameList=None):
"""
Deletes a list of connection definitions from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_delete_connection(
CatalogId='string',
ConnectionNameList=[
'string',
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the connections reside. If none is provided, the AWS account ID is used by default.
:type ConnectionNameList: list
:param ConnectionNameList: [REQUIRED]\nA list of names of the connections to delete.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Succeeded': [
'string',
],
'Errors': {
'string': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
}
}
Response Structure
(dict) --
Succeeded (list) --
A list of names of the connection definitions that were successfully deleted.
(string) --
Errors (dict) --
A map of the names of connections that were not successfully deleted to error details.
(string) --
(dict) --
Contains details about an error.
ErrorCode (string) --
The code associated with this error.
ErrorMessage (string) --
A message describing the error.
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Succeeded': [
'string',
],
'Errors': {
'string': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
}
}
:returns:
(string) --
"""
pass
def batch_delete_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionsToDelete=None):
"""
Deletes one or more partitions in a batch operation.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_delete_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionsToDelete=[
{
'Values': [
'string',
]
},
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the partition to be deleted resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the table in question resides.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table that contains the partitions to be deleted.\n
:type PartitionsToDelete: list
:param PartitionsToDelete: [REQUIRED]\nA list of PartitionInput structures that define the partitions to be deleted.\n\n(dict) --Contains a list of values defining partitions.\n\nValues (list) -- [REQUIRED]The list of values.\n\n(string) --\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Errors': [
{
'PartitionValues': [
'string',
],
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
Response Structure
(dict) --
Errors (list) --
The errors encountered when trying to delete the requested partitions.
(dict) --
Contains information about a partition error.
PartitionValues (list) --
The values that define the partition.
(string) --
ErrorDetail (dict) --
The details about the partition error.
ErrorCode (string) --
The code associated with this error.
ErrorMessage (string) --
A message describing the error.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Errors': [
{
'PartitionValues': [
'string',
],
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
:returns:
(string) --
"""
pass
def batch_delete_table(CatalogId=None, DatabaseName=None, TablesToDelete=None):
"""
Deletes multiple tables at once.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_delete_table(
CatalogId='string',
DatabaseName='string',
TablesToDelete=[
'string',
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the tables to delete reside. For Hive compatibility, this name is entirely lowercase.\n
:type TablesToDelete: list
:param TablesToDelete: [REQUIRED]\nA list of the table to delete.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Errors': [
{
'TableName': 'string',
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
Response Structure
(dict) --
Errors (list) --
A list of errors encountered in attempting to delete the specified tables.
(dict) --
An error record for table operations.
TableName (string) --
The name of the table. For Hive compatibility, this must be entirely lowercase.
ErrorDetail (dict) --
The details about the error.
ErrorCode (string) --
The code associated with this error.
ErrorMessage (string) --
A message describing the error.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Errors': [
{
'TableName': 'string',
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def batch_delete_table_version(CatalogId=None, DatabaseName=None, TableName=None, VersionIds=None):
"""
Deletes a specified batch of versions of a table.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_delete_table_version(
CatalogId='string',
DatabaseName='string',
TableName='string',
VersionIds=[
'string',
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table. For Hive compatibility, this name is entirely lowercase.\n
:type VersionIds: list
:param VersionIds: [REQUIRED]\nA list of the IDs of versions to be deleted. A VersionId is a string representation of an integer. Each version is incremented by 1.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Errors': [
{
'TableName': 'string',
'VersionId': 'string',
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
Response Structure
(dict) --
Errors (list) --
A list of errors encountered while trying to delete the specified table versions.
(dict) --
An error record for table-version operations.
TableName (string) --
The name of the table in question.
VersionId (string) --
The ID value of the version in question. A VersionID is a string representation of an integer. Each version is incremented by 1.
ErrorDetail (dict) --
The details about the error.
ErrorCode (string) --
The code associated with this error.
ErrorMessage (string) --
A message describing the error.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Errors': [
{
'TableName': 'string',
'VersionId': 'string',
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def batch_get_crawlers(CrawlerNames=None):
"""
Returns a list of resource metadata for a given list of crawler names. After calling the ListCrawlers operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_get_crawlers(
CrawlerNames=[
'string',
]
)
:type CrawlerNames: list
:param CrawlerNames: [REQUIRED]\nA list of crawler names, which might be the names returned from the ListCrawlers operation.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax{
'Crawlers': [
{
'Name': 'string',
'Role': 'string',
'Targets': {
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
'DatabaseName': 'string',
'Description': 'string',
'Classifiers': [
'string',
],
'SchemaChangePolicy': {
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
'State': 'READY'|'RUNNING'|'STOPPING',
'TablePrefix': 'string',
'Schedule': {
'ScheduleExpression': 'string',
'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'
},
'CrawlElapsedTime': 123,
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'LastCrawl': {
'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string',
'MessagePrefix': 'string',
'StartTime': datetime(2015, 1, 1)
},
'Version': 123,
'Configuration': 'string',
'CrawlerSecurityConfiguration': 'string'
},
],
'CrawlersNotFound': [
'string',
]
}
Response Structure
(dict) --
Crawlers (list) --A list of crawler definitions.
(dict) --Specifies a crawler program that examines a data source and uses classifiers to try to determine its schema. If successful, the crawler records metadata concerning the data source in the AWS Glue Data Catalog.
Name (string) --The name of the crawler.
Role (string) --The Amazon Resource Name (ARN) of an IAM role that\'s used to access customer resources, such as Amazon Simple Storage Service (Amazon S3) data.
Targets (dict) --A collection of targets to crawl.
S3Targets (list) --Specifies Amazon Simple Storage Service (Amazon S3) targets.
(dict) --Specifies a data store in Amazon Simple Storage Service (Amazon S3).
Path (string) --The path to the Amazon S3 target.
Exclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .
(string) --
JdbcTargets (list) --Specifies JDBC targets.
(dict) --Specifies a JDBC data store to crawl.
ConnectionName (string) --The name of the connection to use to connect to the JDBC target.
Path (string) --The path of the JDBC target.
Exclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .
(string) --
DynamoDBTargets (list) --Specifies Amazon DynamoDB targets.
(dict) --Specifies an Amazon DynamoDB table to crawl.
Path (string) --The name of the DynamoDB table to crawl.
CatalogTargets (list) --Specifies AWS Glue Data Catalog targets.
(dict) --Specifies an AWS Glue Data Catalog target.
DatabaseName (string) --The name of the database to be synchronized.
Tables (list) --A list of the tables to be synchronized.
(string) --
DatabaseName (string) --The name of the database in which the crawler\'s output is stored.
Description (string) --A description of the crawler.
Classifiers (list) --A list of UTF-8 strings that specify the custom classifiers that are associated with the crawler.
(string) --
SchemaChangePolicy (dict) --The policy that specifies update and delete behaviors for the crawler.
UpdateBehavior (string) --The update behavior when the crawler finds a changed schema.
DeleteBehavior (string) --The deletion behavior when the crawler finds a deleted object.
State (string) --Indicates whether the crawler is running, or whether a run is pending.
TablePrefix (string) --The prefix added to the names of tables that are created.
Schedule (dict) --For scheduled crawlers, the schedule when the crawler runs.
ScheduleExpression (string) --A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .
State (string) --The state of the schedule.
CrawlElapsedTime (integer) --If the crawler is running, contains the total time elapsed since the last crawl began.
CreationTime (datetime) --The time that the crawler was created.
LastUpdated (datetime) --The time that the crawler was last updated.
LastCrawl (dict) --The status of the last crawl, and potentially error information if an error occurred.
Status (string) --Status of the last crawl.
ErrorMessage (string) --If an error occurred, the error information about the last crawl.
LogGroup (string) --The log group for the last crawl.
LogStream (string) --The log stream for the last crawl.
MessagePrefix (string) --The prefix for a message about this crawl.
StartTime (datetime) --The time at which the crawl started.
Version (integer) --The version of the crawler.
Configuration (string) --Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\'s behavior. For more information, see Configuring a Crawler .
CrawlerSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used by this crawler.
CrawlersNotFound (list) --A list of names of crawlers that were not found.
(string) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Crawlers': [
{
'Name': 'string',
'Role': 'string',
'Targets': {
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
'DatabaseName': 'string',
'Description': 'string',
'Classifiers': [
'string',
],
'SchemaChangePolicy': {
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
'State': 'READY'|'RUNNING'|'STOPPING',
'TablePrefix': 'string',
'Schedule': {
'ScheduleExpression': 'string',
'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'
},
'CrawlElapsedTime': 123,
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'LastCrawl': {
'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string',
'MessagePrefix': 'string',
'StartTime': datetime(2015, 1, 1)
},
'Version': 123,
'Configuration': 'string',
'CrawlerSecurityConfiguration': 'string'
},
],
'CrawlersNotFound': [
'string',
]
}
:returns:
(string) --
"""
pass
def batch_get_dev_endpoints(DevEndpointNames=None):
"""
Returns a list of resource metadata for a given list of development endpoint names. After calling the ListDevEndpoints operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_get_dev_endpoints(
DevEndpointNames=[
'string',
]
)
:type DevEndpointNames: list
:param DevEndpointNames: [REQUIRED]\nThe list of DevEndpoint names, which might be the names returned from the ListDevEndpoint operation.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax{
'DevEndpoints': [
{
'EndpointName': 'string',
'RoleArn': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'YarnEndpointAddress': 'string',
'PrivateAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'PublicAddress': 'string',
'Status': 'string',
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'NumberOfNodes': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'LastUpdateStatus': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'LastModifiedTimestamp': datetime(2015, 1, 1),
'PublicKey': 'string',
'PublicKeys': [
'string',
],
'SecurityConfiguration': 'string',
'Arguments': {
'string': 'string'
}
},
],
'DevEndpointsNotFound': [
'string',
]
}
Response Structure
(dict) --
DevEndpoints (list) --A list of DevEndpoint definitions.
(dict) --A development endpoint where a developer can remotely debug extract, transform, and load (ETL) scripts.
EndpointName (string) --The name of the DevEndpoint .
RoleArn (string) --The Amazon Resource Name (ARN) of the IAM role used in this DevEndpoint .
SecurityGroupIds (list) --A list of security group identifiers used in this DevEndpoint .
(string) --
SubnetId (string) --The subnet ID for this DevEndpoint .
YarnEndpointAddress (string) --The YARN endpoint address used by this DevEndpoint .
PrivateAddress (string) --A private IP address to access the DevEndpoint within a VPC if the DevEndpoint is created within one. The PrivateAddress field is present only when you create the DevEndpoint within your VPC.
ZeppelinRemoteSparkInterpreterPort (integer) --The Apache Zeppelin port for the remote Apache Spark interpreter.
PublicAddress (string) --The public IP address used by this DevEndpoint . The PublicAddress field is present only when you create a non-virtual private cloud (VPC) DevEndpoint .
Status (string) --The current status of this DevEndpoint .
WorkerType (string) --The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
Known issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.
GlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Development endpoints that are created without specifying a Glue version default to Glue 0.9.
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
NumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated to the development endpoint.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
NumberOfNodes (integer) --The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint .
AvailabilityZone (string) --The AWS Availability Zone where this DevEndpoint is located.
VpcId (string) --The ID of the virtual private cloud (VPC) used by this DevEndpoint .
ExtraPythonLibsS3Path (string) --The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.
Note
You can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not currently supported.
ExtraJarsS3Path (string) --The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .
Note
You can only use pure Java/Scala libraries with a DevEndpoint .
FailureReason (string) --The reason for a current failure in this DevEndpoint .
LastUpdateStatus (string) --The status of the last update.
CreatedTimestamp (datetime) --The point in time at which this DevEndpoint was created.
LastModifiedTimestamp (datetime) --The point in time at which this DevEndpoint was last modified.
PublicKey (string) --The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.
PublicKeys (list) --A list of public keys to be used by the DevEndpoints for authentication. Using this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.
Note
If you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys. Call the UpdateDevEndpoint API operation with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.
(string) --
SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this DevEndpoint .
Arguments (dict) --A map of arguments used to configure the DevEndpoint .
Valid arguments are:
"--enable-glue-datacatalog": ""
"GLUE_PYTHON_VERSION": "3"
"GLUE_PYTHON_VERSION": "2"
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
(string) --
(string) --
DevEndpointsNotFound (list) --A list of DevEndpoints not found.
(string) --
Exceptions
Glue.Client.exceptions.AccessDeniedException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'DevEndpoints': [
{
'EndpointName': 'string',
'RoleArn': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'YarnEndpointAddress': 'string',
'PrivateAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'PublicAddress': 'string',
'Status': 'string',
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'NumberOfNodes': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'LastUpdateStatus': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'LastModifiedTimestamp': datetime(2015, 1, 1),
'PublicKey': 'string',
'PublicKeys': [
'string',
],
'SecurityConfiguration': 'string',
'Arguments': {
'string': 'string'
}
},
],
'DevEndpointsNotFound': [
'string',
]
}
:returns:
(string) --
"""
pass
def batch_get_jobs(JobNames=None):
"""
Returns a list of resource metadata for a given list of job names. After calling the ListJobs operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_get_jobs(
JobNames=[
'string',
]
)
:type JobNames: list
:param JobNames: [REQUIRED]\nA list of job names, which might be the names returned from the ListJobs operation.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax{
'Jobs': [
{
'Name': 'string',
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
],
'JobsNotFound': [
'string',
]
}
Response Structure
(dict) --
Jobs (list) --A list of job definitions.
(dict) --Specifies a job definition.
Name (string) --The name you assign to this job definition.
Description (string) --A description of the job.
LogUri (string) --This field is reserved for future use.
Role (string) --The name or Amazon Resource Name (ARN) of the IAM role associated with this job.
CreatedOn (datetime) --The time and date that this job definition was created.
LastModifiedOn (datetime) --The last point in time when this job definition was modified.
ExecutionProperty (dict) --An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.
MaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.
Command (dict) --The JobCommand that executes this job.
Name (string) --The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .
ScriptLocation (string) --Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.
PythonVersion (string) --The Python version being used to execute a Python shell job. Allowed values are 2 or 3.
DefaultArguments (dict) --The default arguments for this job, specified as name-value pairs.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
NonOverridableArguments (dict) --Non-overridable arguments for this job, specified as name-value pairs.
(string) --
(string) --
Connections (dict) --The connections used for this job.
Connections (list) --A list of connections used by the job.
(string) --
MaxRetries (integer) --The maximum number of times to retry this job after a JobRun fails.
AllocatedCapacity (integer) --This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to runs of this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Timeout (integer) --The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
MaxCapacity (float) --The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
NumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this job.
NotificationProperty (dict) --Specifies configuration properties of a job notification.
NotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
JobsNotFound (list) --A list of names of jobs not found.
(string) --
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'Jobs': [
{
'Name': 'string',
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
],
'JobsNotFound': [
'string',
]
}
:returns:
(string) --
(string) --
"""
pass
def batch_get_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionsToGet=None):
"""
Retrieves partitions in a batch request.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_get_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionsToGet=[
{
'Values': [
'string',
]
},
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database where the partitions reside.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the partitions\' table.\n
:type PartitionsToGet: list
:param PartitionsToGet: [REQUIRED]\nA list of partition values identifying the partitions to retrieve.\n\n(dict) --Contains a list of values defining partitions.\n\nValues (list) -- [REQUIRED]The list of values.\n\n(string) --\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Partitions': [
{
'Values': [
'string',
],
'DatabaseName': 'string',
'TableName': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
},
],
'UnprocessedKeys': [
{
'Values': [
'string',
]
},
]
}
Response Structure
(dict) --
Partitions (list) --
A list of the requested partitions.
(dict) --
Represents a slice of table data.
Values (list) --
The values of the partition.
(string) --
DatabaseName (string) --
The name of the catalog database in which to create the partition.
TableName (string) --
The name of the database table in which to create the partition.
CreationTime (datetime) --
The time at which the partition was created.
LastAccessTime (datetime) --
The last time at which the partition was accessed.
StorageDescriptor (dict) --
Provides information about the physical location where the partition is stored.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
Parameters (dict) --
These key-value pairs define partition parameters.
(string) --
(string) --
LastAnalyzedTime (datetime) --
The last time at which column statistics were computed for this partition.
UnprocessedKeys (list) --
A list of the partition values in the request for which partitions were not returned.
(dict) --
Contains a list of values defining partitions.
Values (list) --
The list of values.
(string) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Partitions': [
{
'Values': [
'string',
],
'DatabaseName': 'string',
'TableName': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
},
],
'UnprocessedKeys': [
{
'Values': [
'string',
]
},
]
}
:returns:
(string) --
"""
pass
def batch_get_triggers(TriggerNames=None):
"""
Returns a list of resource metadata for a given list of trigger names. After calling the ListTriggers operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_get_triggers(
TriggerNames=[
'string',
]
)
:type TriggerNames: list
:param TriggerNames: [REQUIRED]\nA list of trigger names, which may be the names returned from the ListTriggers operation.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax{
'Triggers': [
{
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
},
],
'TriggersNotFound': [
'string',
]
}
Response Structure
(dict) --
Triggers (list) --A list of trigger definitions.
(dict) --Information about a specific trigger.
Name (string) --The name of the trigger.
WorkflowName (string) --The name of the workflow associated with the trigger.
Id (string) --Reserved for future use.
Type (string) --The type of trigger that this is.
State (string) --The current state of the trigger.
Description (string) --A description of this trigger.
Schedule (string) --A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --The actions initiated by this trigger.
(dict) --Defines an action to be initiated by a trigger.
JobName (string) --The name of a job to be executed.
Arguments (dict) --The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --The name of the crawler to be used with this action.
Predicate (dict) --The predicate of this trigger, which defines when it will fire.
Logical (string) --An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --A list of the conditions that determine when the trigger will fire.
(dict) --Defines a condition under which a trigger fires.
LogicalOperator (string) --A logical operator.
JobName (string) --The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --The name of the crawler to which this condition applies.
CrawlState (string) --The state of the crawler to which this condition applies.
TriggersNotFound (list) --A list of names of triggers not found.
(string) --
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'Triggers': [
{
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
},
],
'TriggersNotFound': [
'string',
]
}
:returns:
(string) --
(string) --
"""
pass
def batch_get_workflows(Names=None, IncludeGraph=None):
"""
Returns a list of resource metadata for a given list of workflow names. After calling the ListWorkflows operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_get_workflows(
Names=[
'string',
],
IncludeGraph=True|False
)
:type Names: list
:param Names: [REQUIRED]\nA list of workflow names, which may be the names returned from the ListWorkflows operation.\n\n(string) --\n\n
:type IncludeGraph: boolean
:param IncludeGraph: Specifies whether to include a graph when returning the workflow resource metadata.
:rtype: dict
ReturnsResponse Syntax
{
'Workflows': [
{
'Name': 'string',
'Description': 'string',
'DefaultRunProperties': {
'string': 'string'
},
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'LastRun': {
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
],
'MissingWorkflows': [
'string',
]
}
Response Structure
(dict) --
Workflows (list) --
A list of workflow resource metadata.
(dict) --
A workflow represents a flow in which AWS Glue components should be executed to complete a logical task.
Name (string) --
The name of the workflow representing the flow.
Description (string) --
A description of the workflow.
DefaultRunProperties (dict) --
A collection of properties to be used as part of each execution of the workflow.
(string) --
(string) --
CreatedOn (datetime) --
The date and time when the workflow was created.
LastModifiedOn (datetime) --
The date and time when the workflow was last modified.
LastRun (dict) --
The information about the last execution of the workflow.
Name (string) --
Name of the workflow which was executed.
WorkflowRunId (string) --
The ID of this workflow run.
WorkflowRunProperties (dict) --
The workflow run properties which were set during the run.
(string) --
(string) --
StartedOn (datetime) --
The date and time when the workflow run was started.
CompletedOn (datetime) --
The date and time when the workflow run completed.
Status (string) --
The status of the workflow run.
Statistics (dict) --
The statistics of the run.
TotalActions (integer) --
Total number of Actions in the workflow run.
TimeoutActions (integer) --
Total number of Actions which timed out.
FailedActions (integer) --
Total number of Actions which have failed.
StoppedActions (integer) --
Total number of Actions which have stopped.
SucceededActions (integer) --
Total number of Actions which have succeeded.
RunningActions (integer) --
Total number Actions in running state.
Graph (dict) --
The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.
Nodes (list) --
A list of the the AWS Glue components belong to the workflow represented as nodes.
(dict) --
A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.
Type (string) --
The type of AWS Glue component represented by the node.
Name (string) --
The name of the AWS Glue component represented by the node.
UniqueId (string) --
The unique Id assigned to the node within the workflow.
TriggerDetails (dict) --
Details of the Trigger when the node represents a Trigger.
Trigger (dict) --
The information of the trigger represented by the trigger node.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
JobDetails (dict) --
Details of the Job when the node represents a Job.
JobRuns (list) --
The information for the job runs represented by the job node.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
CrawlerDetails (dict) --
Details of the crawler when the node represents a crawler.
Crawls (list) --
A list of crawls represented by the crawl node.
(dict) --
The details of a crawl in the workflow.
State (string) --
The state of the crawler.
StartedOn (datetime) --
The date and time on which the crawl started.
CompletedOn (datetime) --
The date and time on which the crawl completed.
ErrorMessage (string) --
The error message associated with the crawl.
LogGroup (string) --
The log group associated with the crawl.
LogStream (string) --
The log stream associated with the crawl.
Edges (list) --
A list of all the directed connections between the nodes belonging to the workflow.
(dict) --
An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.
SourceId (string) --
The unique of the node within the workflow where the edge starts.
DestinationId (string) --
The unique of the node within the workflow where the edge ends.
Graph (dict) --
The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.
Nodes (list) --
A list of the the AWS Glue components belong to the workflow represented as nodes.
(dict) --
A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.
Type (string) --
The type of AWS Glue component represented by the node.
Name (string) --
The name of the AWS Glue component represented by the node.
UniqueId (string) --
The unique Id assigned to the node within the workflow.
TriggerDetails (dict) --
Details of the Trigger when the node represents a Trigger.
Trigger (dict) --
The information of the trigger represented by the trigger node.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
JobDetails (dict) --
Details of the Job when the node represents a Job.
JobRuns (list) --
The information for the job runs represented by the job node.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
CrawlerDetails (dict) --
Details of the crawler when the node represents a crawler.
Crawls (list) --
A list of crawls represented by the crawl node.
(dict) --
The details of a crawl in the workflow.
State (string) --
The state of the crawler.
StartedOn (datetime) --
The date and time on which the crawl started.
CompletedOn (datetime) --
The date and time on which the crawl completed.
ErrorMessage (string) --
The error message associated with the crawl.
LogGroup (string) --
The log group associated with the crawl.
LogStream (string) --
The log stream associated with the crawl.
Edges (list) --
A list of all the directed connections between the nodes belonging to the workflow.
(dict) --
An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.
SourceId (string) --
The unique of the node within the workflow where the edge starts.
DestinationId (string) --
The unique of the node within the workflow where the edge ends.
MissingWorkflows (list) --
A list of names of workflows not found.
(string) --
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'Workflows': [
{
'Name': 'string',
'Description': 'string',
'DefaultRunProperties': {
'string': 'string'
},
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'LastRun': {
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
],
'MissingWorkflows': [
'string',
]
}
:returns:
(string) --
(string) --
"""
pass
def batch_stop_job_run(JobName=None, JobRunIds=None):
"""
Stops one or more job runs for a specified job definition.
See also: AWS API Documentation
Exceptions
:example: response = client.batch_stop_job_run(
JobName='string',
JobRunIds=[
'string',
]
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job definition for which to stop job runs.\n
:type JobRunIds: list
:param JobRunIds: [REQUIRED]\nA list of the JobRunIds that should be stopped for that job definition.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'SuccessfulSubmissions': [
{
'JobName': 'string',
'JobRunId': 'string'
},
],
'Errors': [
{
'JobName': 'string',
'JobRunId': 'string',
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
Response Structure
(dict) --
SuccessfulSubmissions (list) --
A list of the JobRuns that were successfully submitted for stopping.
(dict) --
Records a successful request to stop a specified JobRun .
JobName (string) --
The name of the job definition used in the job run that was stopped.
JobRunId (string) --
The JobRunId of the job run that was stopped.
Errors (list) --
A list of the errors that were encountered in trying to stop JobRuns , including the JobRunId for which each error was encountered and details about the error.
(dict) --
Records an error that occurred when attempting to stop a specified job run.
JobName (string) --
The name of the job definition that is used in the job run in question.
JobRunId (string) --
The JobRunId of the job run in question.
ErrorDetail (dict) --
Specifies details about the error that was encountered.
ErrorCode (string) --
The code associated with this error.
ErrorMessage (string) --
A message describing the error.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'SuccessfulSubmissions': [
{
'JobName': 'string',
'JobRunId': 'string'
},
],
'Errors': [
{
'JobName': 'string',
'JobRunId': 'string',
'ErrorDetail': {
'ErrorCode': 'string',
'ErrorMessage': 'string'
}
},
]
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def cancel_ml_task_run(TransformId=None, TaskRunId=None):
"""
Cancels (stops) a task run. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can cancel a machine learning task run at any time by calling CancelMLTaskRun with a task run\'s parent transform\'s TransformID and the task run\'s TaskRunId .
See also: AWS API Documentation
Exceptions
:example: response = client.cancel_ml_task_run(
TransformId='string',
TaskRunId='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:type TaskRunId: string
:param TaskRunId: [REQUIRED]\nA unique identifier for the task run.\n
:rtype: dict
ReturnsResponse Syntax
{
'TransformId': 'string',
'TaskRunId': 'string',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT'
}
Response Structure
(dict) --
TransformId (string) --
The unique identifier of the machine learning transform.
TaskRunId (string) --
The unique identifier for the task run.
Status (string) --
The status for this run.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TransformId': 'string',
'TaskRunId': 'string',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
"""
pass
def create_classifier(GrokClassifier=None, XMLClassifier=None, JsonClassifier=None, CsvClassifier=None):
"""
Creates a classifier in the user\'s account. This can be a GrokClassifier , an XMLClassifier , a JsonClassifier , or a CsvClassifier , depending on which field of the request is present.
See also: AWS API Documentation
Exceptions
:example: response = client.create_classifier(
GrokClassifier={
'Classification': 'string',
'Name': 'string',
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
XMLClassifier={
'Classification': 'string',
'Name': 'string',
'RowTag': 'string'
},
JsonClassifier={
'Name': 'string',
'JsonPath': 'string'
},
CsvClassifier={
'Name': 'string',
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
)
:type GrokClassifier: dict
:param GrokClassifier: A GrokClassifier object specifying the classifier to create.\n\nClassification (string) -- [REQUIRED]An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, Amazon CloudWatch Logs, and so on.\n\nName (string) -- [REQUIRED]The name of the new classifier.\n\nGrokPattern (string) -- [REQUIRED]The grok pattern used by this classifier.\n\nCustomPatterns (string) --Optional custom grok patterns used by this classifier.\n\n\n
:type XMLClassifier: dict
:param XMLClassifier: An XMLClassifier object specifying the classifier to create.\n\nClassification (string) -- [REQUIRED]An identifier of the data format that the classifier matches.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nRowTag (string) --The XML tag designating the element that contains each record in an XML document being parsed. This can\'t identify a self-closing element (closed by /> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <row item_a='A' item_b='B'></row> is okay, but <row item_a='A' item_b='B' /> is not).\n\n\n
:type JsonClassifier: dict
:param JsonClassifier: A JsonClassifier object specifying the classifier to create.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nJsonPath (string) -- [REQUIRED]A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath , as described in Writing JsonPath Custom Classifiers .\n\n\n
:type CsvClassifier: dict
:param CsvClassifier: A CsvClassifier object specifying the classifier to create.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nDelimiter (string) --A custom symbol to denote what separates each column entry in the row.\n\nQuoteSymbol (string) --A custom symbol to denote what combines content into a single column value. Must be different from the column delimiter.\n\nContainsHeader (string) --Indicates whether the CSV file contains a header.\n\nHeader (list) --A list of strings representing column names.\n\n(string) --\n\n\nDisableValueTrimming (boolean) --Specifies not to trim values before identifying the type of column values. The default value is true.\n\nAllowSingleColumn (boolean) --Enables the processing of files that contain only one column.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def create_connection(CatalogId=None, ConnectionInput=None):
"""
Creates a connection definition in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.create_connection(
CatalogId='string',
ConnectionInput={
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
}
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which to create the connection. If none is provided, the AWS account ID is used by default.
:type ConnectionInput: dict
:param ConnectionInput: [REQUIRED]\nA ConnectionInput object defining the connection to create.\n\nName (string) -- [REQUIRED]The name of the connection.\n\nDescription (string) --The description of the connection.\n\nConnectionType (string) -- [REQUIRED]The type of the connection. Currently, these types are supported:\n\nJDBC - Designates a connection to a database through Java Database Connectivity (JDBC).\nKAFKA - Designates a connection to an Apache Kafka streaming platform.\nMONGODB - Designates a connection to a MongoDB document database.\n\nSFTP is not supported.\n\nMatchCriteria (list) --A list of criteria that can be used in selecting this connection.\n\n(string) --\n\n\nConnectionProperties (dict) -- [REQUIRED]These key-value pairs define parameters for the connection.\n\n(string) --\n(string) --\n\n\n\n\nPhysicalConnectionRequirements (dict) --A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup , that are needed to successfully make this connection.\n\nSubnetId (string) --The subnet ID used by the connection.\n\nSecurityGroupIdList (list) --The security group ID list used by the connection.\n\n(string) --\n\n\nAvailabilityZone (string) --The connection\'s Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def create_crawler(Name=None, Role=None, DatabaseName=None, Description=None, Targets=None, Schedule=None, Classifiers=None, TablePrefix=None, SchemaChangePolicy=None, Configuration=None, CrawlerSecurityConfiguration=None, Tags=None):
"""
Creates a new crawler with specified targets, role, configuration, and optional schedule. At least one crawl target must be specified, in the s3Targets field, the jdbcTargets field, or the DynamoDBTargets field.
See also: AWS API Documentation
Exceptions
:example: response = client.create_crawler(
Name='string',
Role='string',
DatabaseName='string',
Description='string',
Targets={
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
Schedule='string',
Classifiers=[
'string',
],
TablePrefix='string',
SchemaChangePolicy={
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
Configuration='string',
CrawlerSecurityConfiguration='string',
Tags={
'string': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]\nName of the new crawler.\n
:type Role: string
:param Role: [REQUIRED]\nThe IAM role or Amazon Resource Name (ARN) of an IAM role used by the new crawler to access customer resources.\n
:type DatabaseName: string
:param DatabaseName: The AWS Glue database where results are written, such as: arn:aws:daylight:us-east-1::database/sometable/* .
:type Description: string
:param Description: A description of the new crawler.
:type Targets: dict
:param Targets: [REQUIRED]\nA list of collection of targets to crawl.\n\nS3Targets (list) --Specifies Amazon Simple Storage Service (Amazon S3) targets.\n\n(dict) --Specifies a data store in Amazon Simple Storage Service (Amazon S3).\n\nPath (string) --The path to the Amazon S3 target.\n\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n\n(string) --\n\n\n\n\n\n\nJdbcTargets (list) --Specifies JDBC targets.\n\n(dict) --Specifies a JDBC data store to crawl.\n\nConnectionName (string) --The name of the connection to use to connect to the JDBC target.\n\nPath (string) --The path of the JDBC target.\n\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n\n(string) --\n\n\n\n\n\n\nDynamoDBTargets (list) --Specifies Amazon DynamoDB targets.\n\n(dict) --Specifies an Amazon DynamoDB table to crawl.\n\nPath (string) --The name of the DynamoDB table to crawl.\n\n\n\n\n\nCatalogTargets (list) --Specifies AWS Glue Data Catalog targets.\n\n(dict) --Specifies an AWS Glue Data Catalog target.\n\nDatabaseName (string) -- [REQUIRED]The name of the database to be synchronized.\n\nTables (list) -- [REQUIRED]A list of the tables to be synchronized.\n\n(string) --\n\n\n\n\n\n\n\n
:type Schedule: string
:param Schedule: A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .
:type Classifiers: list
:param Classifiers: A list of custom classifiers that the user has registered. By default, all built-in classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.\n\n(string) --\n\n
:type TablePrefix: string
:param TablePrefix: The table prefix used for catalog tables that are created.
:type SchemaChangePolicy: dict
:param SchemaChangePolicy: The policy for the crawler\'s update and deletion behavior.\n\nUpdateBehavior (string) --The update behavior when the crawler finds a changed schema.\n\nDeleteBehavior (string) --The deletion behavior when the crawler finds a deleted object.\n\n\n
:type Configuration: string
:param Configuration: The crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\'s behavior. For more information, see Configuring a Crawler .
:type CrawlerSecurityConfiguration: string
:param CrawlerSecurityConfiguration: The name of the SecurityConfiguration structure to be used by this crawler.
:type Tags: dict
:param Tags: The tags to use with this crawler request. You can use tags to limit access to the crawler. For more information, see AWS Tags in AWS Glue .\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
:return: {}
:returns:
(dict) --
"""
pass
def create_database(CatalogId=None, DatabaseInput=None):
"""
Creates a new database in a Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.create_database(
CatalogId='string',
DatabaseInput={
'Name': 'string',
'Description': 'string',
'LocationUri': 'string',
'Parameters': {
'string': 'string'
},
'CreateTableDefaultPermissions': [
{
'Principal': {
'DataLakePrincipalIdentifier': 'string'
},
'Permissions': [
'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',
]
},
]
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which to create the database. If none is provided, the AWS account ID is used by default.
:type DatabaseInput: dict
:param DatabaseInput: [REQUIRED]\nThe metadata for the database.\n\nName (string) -- [REQUIRED]The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.\n\nDescription (string) --A description of the database.\n\nLocationUri (string) --The location of the database (for example, an HDFS path).\n\nParameters (dict) --These key-value pairs define parameters and properties of the database.\nThese key-value pairs define parameters and properties of the database.\n\n(string) --\n(string) --\n\n\n\n\nCreateTableDefaultPermissions (list) --Creates a set of default permissions on the table for principals.\n\n(dict) --Permissions granted to a principal.\n\nPrincipal (dict) --The principal who is granted permissions.\n\nDataLakePrincipalIdentifier (string) --An identifier for the AWS Lake Formation principal.\n\n\n\nPermissions (list) --The permissions that are granted to the principal.\n\n(string) --\n\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def create_dev_endpoint(EndpointName=None, RoleArn=None, SecurityGroupIds=None, SubnetId=None, PublicKey=None, PublicKeys=None, NumberOfNodes=None, WorkerType=None, GlueVersion=None, NumberOfWorkers=None, ExtraPythonLibsS3Path=None, ExtraJarsS3Path=None, SecurityConfiguration=None, Tags=None, Arguments=None):
"""
Creates a new development endpoint.
See also: AWS API Documentation
Exceptions
:example: response = client.create_dev_endpoint(
EndpointName='string',
RoleArn='string',
SecurityGroupIds=[
'string',
],
SubnetId='string',
PublicKey='string',
PublicKeys=[
'string',
],
NumberOfNodes=123,
WorkerType='Standard'|'G.1X'|'G.2X',
GlueVersion='string',
NumberOfWorkers=123,
ExtraPythonLibsS3Path='string',
ExtraJarsS3Path='string',
SecurityConfiguration='string',
Tags={
'string': 'string'
},
Arguments={
'string': 'string'
}
)
:type EndpointName: string
:param EndpointName: [REQUIRED]\nThe name to be assigned to the new DevEndpoint .\n
:type RoleArn: string
:param RoleArn: [REQUIRED]\nThe IAM role for the DevEndpoint .\n
:type SecurityGroupIds: list
:param SecurityGroupIds: Security group IDs for the security groups to be used by the new DevEndpoint .\n\n(string) --\n\n
:type SubnetId: string
:param SubnetId: The subnet ID for the new DevEndpoint to use.
:type PublicKey: string
:param PublicKey: The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.
:type PublicKeys: list
:param PublicKeys: A list of public keys to be used by the development endpoints for authentication. The use of this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.\n\nNote\nIf you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys. Call the UpdateDevEndpoint API with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.\n\n\n(string) --\n\n
:type NumberOfNodes: integer
:param NumberOfNodes: The number of AWS Glue Data Processing Units (DPUs) to allocate to this DevEndpoint .
:type WorkerType: string
:param WorkerType: The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\nFor the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\n\nKnown issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.\n
:type GlueVersion: string
:param GlueVersion: Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nDevelopment endpoints that are created without specifying a Glue version default to Glue 0.9.\nYou can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.\n
:type NumberOfWorkers: integer
:param NumberOfWorkers: The number of workers of a defined workerType that are allocated to the development endpoint.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n
:type ExtraPythonLibsS3Path: string
:param ExtraPythonLibsS3Path: The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.\n\nNote\nYou can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not yet supported.\n\n
:type ExtraJarsS3Path: string
:param ExtraJarsS3Path: The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .
:type SecurityConfiguration: string
:param SecurityConfiguration: The name of the SecurityConfiguration structure to be used with this DevEndpoint .
:type Tags: dict
:param Tags: The tags to use with this DevEndpoint. You may use tags to limit access to the DevEndpoint. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.\n\n(string) --\n(string) --\n\n\n\n
:type Arguments: dict
:param Arguments: A map of arguments used to configure the DevEndpoint .\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'EndpointName': 'string',
'Status': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'RoleArn': 'string',
'YarnEndpointAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'NumberOfNodes': 123,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'SecurityConfiguration': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'Arguments': {
'string': 'string'
}
}
Response Structure
(dict) --
EndpointName (string) --
The name assigned to the new DevEndpoint .
Status (string) --
The current status of the new DevEndpoint .
SecurityGroupIds (list) --
The security groups assigned to the new DevEndpoint .
(string) --
SubnetId (string) --
The subnet ID assigned to the new DevEndpoint .
RoleArn (string) --
The Amazon Resource Name (ARN) of the role assigned to the new DevEndpoint .
YarnEndpointAddress (string) --
The address of the YARN endpoint used by this DevEndpoint .
ZeppelinRemoteSparkInterpreterPort (integer) --
The Apache Zeppelin port for the remote Apache Spark interpreter.
NumberOfNodes (integer) --
The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint.
WorkerType (string) --
The type of predefined worker that is allocated to the development endpoint. May be a value of Standard, G.1X, or G.2X.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated to the development endpoint.
AvailabilityZone (string) --
The AWS Availability Zone where this DevEndpoint is located.
VpcId (string) --
The ID of the virtual private cloud (VPC) used by this DevEndpoint .
ExtraPythonLibsS3Path (string) --
The paths to one or more Python libraries in an S3 bucket that will be loaded in your DevEndpoint .
ExtraJarsS3Path (string) --
Path to one or more Java .jar files in an S3 bucket that will be loaded in your DevEndpoint .
FailureReason (string) --
The reason for a current failure in this DevEndpoint .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure being used with this DevEndpoint .
CreatedTimestamp (datetime) --
The point in time at which this DevEndpoint was created.
Arguments (dict) --
The map of arguments used to configure this DevEndpoint .
Valid arguments are:
"--enable-glue-datacatalog": ""
"GLUE_PYTHON_VERSION": "3"
"GLUE_PYTHON_VERSION": "2"
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
(string) --
(string) --
Exceptions
Glue.Client.exceptions.AccessDeniedException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.IdempotentParameterMismatchException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.ValidationException
Glue.Client.exceptions.ResourceNumberLimitExceededException
:return: {
'EndpointName': 'string',
'Status': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'RoleArn': 'string',
'YarnEndpointAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'NumberOfNodes': 123,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'SecurityConfiguration': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'Arguments': {
'string': 'string'
}
}
:returns:
(string) --
"""
pass
def create_job(Name=None, Description=None, LogUri=None, Role=None, ExecutionProperty=None, Command=None, DefaultArguments=None, NonOverridableArguments=None, Connections=None, MaxRetries=None, AllocatedCapacity=None, Timeout=None, MaxCapacity=None, SecurityConfiguration=None, Tags=None, NotificationProperty=None, GlueVersion=None, NumberOfWorkers=None, WorkerType=None):
"""
Creates a new job definition.
See also: AWS API Documentation
Exceptions
:example: response = client.create_job(
Name='string',
Description='string',
LogUri='string',
Role='string',
ExecutionProperty={
'MaxConcurrentRuns': 123
},
Command={
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
DefaultArguments={
'string': 'string'
},
NonOverridableArguments={
'string': 'string'
},
Connections={
'Connections': [
'string',
]
},
MaxRetries=123,
AllocatedCapacity=123,
Timeout=123,
MaxCapacity=123.0,
SecurityConfiguration='string',
Tags={
'string': 'string'
},
NotificationProperty={
'NotifyDelayAfter': 123
},
GlueVersion='string',
NumberOfWorkers=123,
WorkerType='Standard'|'G.1X'|'G.2X'
)
:type Name: string
:param Name: [REQUIRED]\nThe name you assign to this job definition. It must be unique in your account.\n
:type Description: string
:param Description: Description of the job being defined.
:type LogUri: string
:param LogUri: This field is reserved for future use.
:type Role: string
:param Role: [REQUIRED]\nThe name or Amazon Resource Name (ARN) of the IAM role associated with this job.\n
:type ExecutionProperty: dict
:param ExecutionProperty: An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.\n\nMaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.\n\n\n
:type Command: dict
:param Command: [REQUIRED]\nThe JobCommand that executes this job.\n\nName (string) --The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .\n\nScriptLocation (string) --Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.\n\nPythonVersion (string) --The Python version being used to execute a Python shell job. Allowed values are 2 or 3.\n\n\n
:type DefaultArguments: dict
:param DefaultArguments: The default arguments for this job.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n
:type NonOverridableArguments: dict
:param NonOverridableArguments: Non-overridable arguments for this job, specified as name-value pairs.\n\n(string) --\n(string) --\n\n\n\n
:type Connections: dict
:param Connections: The connections used for this job.\n\nConnections (list) --A list of connections used by the job.\n\n(string) --\n\n\n\n
:type MaxRetries: integer
:param MaxRetries: The maximum number of times to retry this job if it fails.
:type AllocatedCapacity: integer
:param AllocatedCapacity: This parameter is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) to allocate to this Job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n
:type Timeout: integer
:param Timeout: The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
:type MaxCapacity: float
:param MaxCapacity: The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name ='pythonshell'), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name ='glueetl'), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n
:type SecurityConfiguration: string
:param SecurityConfiguration: The name of the SecurityConfiguration structure to be used with this job.
:type Tags: dict
:param Tags: The tags to use with this job. You may use tags to limit access to the job. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.\n\n(string) --\n(string) --\n\n\n\n
:type NotificationProperty: dict
:param NotificationProperty: Specifies configuration properties of a job notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n
:type GlueVersion: string
:param GlueVersion: Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\nJobs that are created without specifying a Glue version default to Glue 0.9.\n
:type NumberOfWorkers: integer
:param NumberOfWorkers: The number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n
:type WorkerType: string
:param WorkerType: The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\nFor the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --
The unique name that was provided for this job definition.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.IdempotentParameterMismatchException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.IdempotentParameterMismatchException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
"""
pass
def create_ml_transform(Name=None, Description=None, InputRecordTables=None, Parameters=None, Role=None, GlueVersion=None, MaxCapacity=None, WorkerType=None, NumberOfWorkers=None, Timeout=None, MaxRetries=None, Tags=None):
"""
Creates an AWS Glue machine learning transform. This operation creates the transform and all the necessary parameters to train it.
Call this operation as the first step in the process of using a machine learning transform (such as the FindMatches transform) for deduplicating data. You can provide an optional Description , in addition to the parameters that you want to use for your algorithm.
You must also specify certain parameters for the tasks that AWS Glue runs on your behalf as part of learning from your data and creating a high-quality machine learning transform. These parameters include Role , and optionally, AllocatedCapacity , Timeout , and MaxRetries . For more information, see Jobs .
See also: AWS API Documentation
Exceptions
:example: response = client.create_ml_transform(
Name='string',
Description='string',
InputRecordTables=[
{
'DatabaseName': 'string',
'TableName': 'string',
'CatalogId': 'string',
'ConnectionName': 'string'
},
],
Parameters={
'TransformType': 'FIND_MATCHES',
'FindMatchesParameters': {
'PrimaryKeyColumnName': 'string',
'PrecisionRecallTradeoff': 123.0,
'AccuracyCostTradeoff': 123.0,
'EnforceProvidedLabels': True|False
}
},
Role='string',
GlueVersion='string',
MaxCapacity=123.0,
WorkerType='Standard'|'G.1X'|'G.2X',
NumberOfWorkers=123,
Timeout=123,
MaxRetries=123,
Tags={
'string': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]\nThe unique name that you give the transform when you create it.\n
:type Description: string
:param Description: A description of the machine learning transform that is being defined. The default is an empty string.
:type InputRecordTables: list
:param InputRecordTables: [REQUIRED]\nA list of AWS Glue table definitions used by the transform.\n\n(dict) --The database and table in the AWS Glue Data Catalog that is used for input or output data.\n\nDatabaseName (string) -- [REQUIRED]A database name in the AWS Glue Data Catalog.\n\nTableName (string) -- [REQUIRED]A table name in the AWS Glue Data Catalog.\n\nCatalogId (string) --A unique identifier for the AWS Glue Data Catalog.\n\nConnectionName (string) --The name of the connection to the AWS Glue Data Catalog.\n\n\n\n\n
:type Parameters: dict
:param Parameters: [REQUIRED]\nThe algorithmic parameters that are specific to the transform type used. Conditionally dependent on the transform type.\n\nTransformType (string) -- [REQUIRED]The type of machine learning transform.\nFor information about the types of machine learning transforms, see Creating Machine Learning Transforms .\n\nFindMatchesParameters (dict) --The parameters for the find matches algorithm.\n\nPrimaryKeyColumnName (string) --The name of a column that uniquely identifies rows in the source table. Used to help identify matching records.\n\nPrecisionRecallTradeoff (float) --The value selected when tuning your transform for a balance between precision and recall. A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0 means very low recall, and choosing values close to 0.0 results in very low precision.\nThe precision metric indicates how often your model is correct when it predicts a match.\nThe recall metric indicates that for an actual match, how often your model predicts the match.\n\nAccuracyCostTradeoff (float) --The value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.\nAccuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.\nCost measures how many compute resources, and thus money, are consumed to run the transform.\n\nEnforceProvidedLabels (boolean) --The value to switch on or off to force the output to match the provided labels from users. If the value is True , the find matches transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False , the find matches transform does not ensure all the labels provided are respected, and the results rely on the trained model.\nNote that setting this value to true may increase the conflation execution time.\n\n\n\n\n
:type Role: string
:param Role: [REQUIRED]\nThe name or Amazon Resource Name (ARN) of the IAM role with the required permissions. The required permissions include both AWS Glue service role permissions to AWS Glue resources, and Amazon S3 permissions required by the transform.\n\nThis role needs AWS Glue service role permissions to allow access to resources in AWS Glue. See Attach a Policy to IAM Users That Access AWS Glue .\nThis role needs permission to your Amazon Simple Storage Service (Amazon S3) sources, targets, temporary directory, scripts, and any libraries used by the task run for this transform.\n\n
:type GlueVersion: string
:param GlueVersion: This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.
:type MaxCapacity: float
:param MaxCapacity: The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\nMaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType .\n\nIf either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be set.\nIf MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set.\nIf WorkerType is set, then NumberOfWorkers is required (and vice versa).\nMaxCapacity and NumberOfWorkers must both be at least 1.\n\nWhen the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.\nWhen the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.\n
:type WorkerType: string
:param WorkerType: The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n\nMaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType .\n\nIf either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be set.\nIf MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set.\nIf WorkerType is set, then NumberOfWorkers is required (and vice versa).\nMaxCapacity and NumberOfWorkers must both be at least 1.\n\n
:type NumberOfWorkers: integer
:param NumberOfWorkers: The number of workers of a defined workerType that are allocated when this task runs.\nIf WorkerType is set, then NumberOfWorkers is required (and vice versa).\n
:type Timeout: integer
:param Timeout: The timeout of the task run for this transform in minutes. This is the maximum time that a task run for this transform can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
:type MaxRetries: integer
:param MaxRetries: The maximum number of times to retry a task for this transform after a task run fails.
:type Tags: dict
:param Tags: The tags to use with this machine learning transform. You may use tags to limit access to the machine learning transform. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'TransformId': 'string'
}
Response Structure
(dict) --
TransformId (string) --
A unique identifier that is generated for the transform.
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.AccessDeniedException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.IdempotentParameterMismatchException
:return: {
'TransformId': 'string'
}
:returns:
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.AccessDeniedException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.IdempotentParameterMismatchException
"""
pass
def create_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionInput=None):
"""
Creates a new partition.
See also: AWS API Documentation
Exceptions
:example: response = client.create_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionInput={
'Values': [
'string',
],
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
}
)
:type CatalogId: string
:param CatalogId: The AWS account ID of the catalog in which the partition is to be created.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the metadata database in which the partition is to be created.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the metadata table in which the partition is to be created.\n
:type PartitionInput: dict
:param PartitionInput: [REQUIRED]\nA PartitionInput structure defining the partition to be created.\n\nValues (list) --The values of the partition. Although this parameter is not required by the SDK, you must specify this parameter for a valid input.\nThe values for the keys for the new partition must be passed as an array of String objects that must be ordered in the same order as the partition keys appearing in the Amazon S3 prefix. Otherwise AWS Glue will add the values to the wrong keys.\n\n(string) --\n\n\nLastAccessTime (datetime) --The last time at which the partition was accessed.\n\nStorageDescriptor (dict) --Provides information about the physical location where the partition is stored.\n\nColumns (list) --A list of the Columns in the table.\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\nTrue if the data in the table is compressed, or False if not.\n\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --The serialization/deserialization (SerDe) information.\n\nName (string) --Name of the SerDe.\n\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\n\n(dict) --Specifies the sort order of a sorted column.\n\nColumn (string) -- [REQUIRED]The name of the column.\n\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --The user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\nParameters (dict) --These key-value pairs define partition parameters.\n\n(string) --\n(string) --\n\n\n\n\nLastAnalyzedTime (datetime) --The last time at which column statistics were computed for this partition.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def create_script(DagNodes=None, DagEdges=None, Language=None):
"""
Transforms a directed acyclic graph (DAG) into code.
See also: AWS API Documentation
Exceptions
:example: response = client.create_script(
DagNodes=[
{
'Id': 'string',
'NodeType': 'string',
'Args': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'LineNumber': 123
},
],
DagEdges=[
{
'Source': 'string',
'Target': 'string',
'TargetParameter': 'string'
},
],
Language='PYTHON'|'SCALA'
)
:type DagNodes: list
:param DagNodes: A list of the nodes in the DAG.\n\n(dict) --Represents a node in a directed acyclic graph (DAG)\n\nId (string) -- [REQUIRED]A node identifier that is unique within the node\'s graph.\n\nNodeType (string) -- [REQUIRED]The type of node that this is.\n\nArgs (list) -- [REQUIRED]Properties of the node, in the form of name-value pairs.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\nLineNumber (integer) --The line number of the node.\n\n\n\n\n
:type DagEdges: list
:param DagEdges: A list of the edges in the DAG.\n\n(dict) --Represents a directional edge in a directed acyclic graph (DAG).\n\nSource (string) -- [REQUIRED]The ID of the node at which the edge starts.\n\nTarget (string) -- [REQUIRED]The ID of the node at which the edge ends.\n\nTargetParameter (string) --The target of the edge.\n\n\n\n\n
:type Language: string
:param Language: The programming language of the resulting code from the DAG.
:rtype: dict
ReturnsResponse Syntax
{
'PythonScript': 'string',
'ScalaCode': 'string'
}
Response Structure
(dict) --
PythonScript (string) --
The Python script generated from the DAG.
ScalaCode (string) --
The Scala code generated from the DAG.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'PythonScript': 'string',
'ScalaCode': 'string'
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def create_security_configuration(Name=None, EncryptionConfiguration=None):
"""
Creates a new security configuration. A security configuration is a set of security properties that can be used by AWS Glue. You can use a security configuration to encrypt data at rest. For information about using security configurations in AWS Glue, see Encrypting Data Written by Crawlers, Jobs, and Development Endpoints .
See also: AWS API Documentation
Exceptions
:example: response = client.create_security_configuration(
Name='string',
EncryptionConfiguration={
'S3Encryption': [
{
'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',
'KmsKeyArn': 'string'
},
],
'CloudWatchEncryption': {
'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',
'KmsKeyArn': 'string'
},
'JobBookmarksEncryption': {
'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',
'KmsKeyArn': 'string'
}
}
)
:type Name: string
:param Name: [REQUIRED]\nThe name for the new security configuration.\n
:type EncryptionConfiguration: dict
:param EncryptionConfiguration: [REQUIRED]\nThe encryption configuration for the new security configuration.\n\nS3Encryption (list) --The encryption configuration for Amazon Simple Storage Service (Amazon S3) data.\n\n(dict) --Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted.\n\nS3EncryptionMode (string) --The encryption mode to use for Amazon S3 data.\n\nKmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.\n\n\n\n\n\nCloudWatchEncryption (dict) --The encryption configuration for Amazon CloudWatch.\n\nCloudWatchEncryptionMode (string) --The encryption mode to use for CloudWatch data.\n\nKmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.\n\n\n\nJobBookmarksEncryption (dict) --The encryption configuration for job bookmarks.\n\nJobBookmarksEncryptionMode (string) --The encryption mode to use for job bookmarks data.\n\nKmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Name': 'string',
'CreatedTimestamp': datetime(2015, 1, 1)
}
Response Structure
(dict) --
Name (string) --
The name assigned to the new security configuration.
CreatedTimestamp (datetime) --
The time at which the new security configuration was created.
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
:return: {
'Name': 'string',
'CreatedTimestamp': datetime(2015, 1, 1)
}
:returns:
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
"""
pass
def create_table(CatalogId=None, DatabaseName=None, TableInput=None):
"""
Creates a new table definition in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.create_table(
CatalogId='string',
DatabaseName='string',
TableInput={
'Name': 'string',
'Description': 'string',
'Owner': 'string',
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
}
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which to create the Table . If none is supplied, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe catalog database in which to create the new table. For Hive compatibility, this name is entirely lowercase.\n
:type TableInput: dict
:param TableInput: [REQUIRED]\nThe TableInput object that defines the metadata table to create in the catalog.\n\nName (string) -- [REQUIRED]The table name. For Hive compatibility, this is folded to lowercase when it is stored.\n\nDescription (string) --A description of the table.\n\nOwner (string) --The table owner.\n\nLastAccessTime (datetime) --The last time that the table was accessed.\n\nLastAnalyzedTime (datetime) --The last time that column statistics were computed for this table.\n\nRetention (integer) --The retention time for this table.\n\nStorageDescriptor (dict) --A storage descriptor containing information about the physical storage of this table.\n\nColumns (list) --A list of the Columns in the table.\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\nTrue if the data in the table is compressed, or False if not.\n\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --The serialization/deserialization (SerDe) information.\n\nName (string) --Name of the SerDe.\n\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\n\n(dict) --Specifies the sort order of a sorted column.\n\nColumn (string) -- [REQUIRED]The name of the column.\n\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --The user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\nPartitionKeys (list) --A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.\nWhen you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:\n\n'PartitionKeys': []\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nViewOriginalText (string) --If the table is a view, the original text of the view; otherwise null .\n\nViewExpandedText (string) --If the table is a view, the expanded text of the view; otherwise null .\n\nTableType (string) --The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).\n\nParameters (dict) --These key-value pairs define properties associated with the table.\n\n(string) --\n(string) --\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def create_trigger(Name=None, WorkflowName=None, Type=None, Schedule=None, Predicate=None, Actions=None, Description=None, StartOnCreation=None, Tags=None):
"""
Creates a new trigger.
See also: AWS API Documentation
Exceptions
:example: response = client.create_trigger(
Name='string',
WorkflowName='string',
Type='SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
Schedule='string',
Predicate={
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
},
Actions=[
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
Description='string',
StartOnCreation=True|False,
Tags={
'string': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the trigger.\n
:type WorkflowName: string
:param WorkflowName: The name of the workflow associated with the trigger.
:type Type: string
:param Type: [REQUIRED]\nThe type of the new trigger.\n
:type Schedule: string
:param Schedule: A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\nThis field is required when the trigger type is SCHEDULED.\n
:type Predicate: dict
:param Predicate: A predicate to specify when the new trigger should fire.\nThis field is required when the trigger type is CONDITIONAL .\n\nLogical (string) --An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\n\nConditions (list) --A list of the conditions that determine when the trigger will fire.\n\n(dict) --Defines a condition under which a trigger fires.\n\nLogicalOperator (string) --A logical operator.\n\nJobName (string) --The name of the job whose JobRuns this condition applies to, and on which this trigger waits.\n\nState (string) --The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .\n\nCrawlerName (string) --The name of the crawler to which this condition applies.\n\nCrawlState (string) --The state of the crawler to which this condition applies.\n\n\n\n\n\n\n
:type Actions: list
:param Actions: [REQUIRED]\nThe actions initiated by this trigger when it fires.\n\n(dict) --Defines an action to be initiated by a trigger.\n\nJobName (string) --The name of a job to be executed.\n\nArguments (dict) --The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nTimeout (integer) --The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this action.\n\nNotificationProperty (dict) --Specifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nCrawlerName (string) --The name of the crawler to be used with this action.\n\n\n\n\n
:type Description: string
:param Description: A description of the new trigger.
:type StartOnCreation: boolean
:param StartOnCreation: Set to true to start SCHEDULED and CONDITIONAL triggers when created. True is not supported for ON_DEMAND triggers.
:type Tags: dict
:param Tags: The tags to use with this trigger. You may use tags to limit access to the trigger. For more information about tags in AWS Glue, see AWS Tags in AWS Glue in the developer guide.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --
The name of the trigger.
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.IdempotentParameterMismatchException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
:returns:
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.IdempotentParameterMismatchException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
"""
pass
def create_user_defined_function(CatalogId=None, DatabaseName=None, FunctionInput=None):
"""
Creates a new function definition in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.create_user_defined_function(
CatalogId='string',
DatabaseName='string',
FunctionInput={
'FunctionName': 'string',
'ClassName': 'string',
'OwnerName': 'string',
'OwnerType': 'USER'|'ROLE'|'GROUP',
'ResourceUris': [
{
'ResourceType': 'JAR'|'FILE'|'ARCHIVE',
'Uri': 'string'
},
]
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which to create the function. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which to create the function.\n
:type FunctionInput: dict
:param FunctionInput: [REQUIRED]\nA FunctionInput object that defines the function to create in the Data Catalog.\n\nFunctionName (string) --The name of the function.\n\nClassName (string) --The Java class that contains the function code.\n\nOwnerName (string) --The owner of the function.\n\nOwnerType (string) --The owner type.\n\nResourceUris (list) --The resource URIs for the function.\n\n(dict) --The URIs for function resources.\n\nResourceType (string) --The type of the resource.\n\nUri (string) --The URI for accessing the resource.\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def create_workflow(Name=None, Description=None, DefaultRunProperties=None, Tags=None):
"""
Creates a new workflow.
See also: AWS API Documentation
Exceptions
:example: response = client.create_workflow(
Name='string',
Description='string',
DefaultRunProperties={
'string': 'string'
},
Tags={
'string': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]\nThe name to be assigned to the workflow. It should be unique within your account.\n
:type Description: string
:param Description: A description of the workflow.
:type DefaultRunProperties: dict
:param DefaultRunProperties: A collection of properties to be used as part of each execution of the workflow.\n\n(string) --\n(string) --\n\n\n\n
:type Tags: dict
:param Tags: The tags to be used with this workflow.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --
The name of the workflow which was provided as part of the request.
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
:returns:
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
"""
pass
def delete_classifier(Name=None):
"""
Removes a classifier from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_classifier(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the classifier to remove.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def delete_connection(CatalogId=None, ConnectionName=None):
"""
Deletes a connection from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_connection(
CatalogId='string',
ConnectionName='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the connection resides. If none is provided, the AWS account ID is used by default.
:type ConnectionName: string
:param ConnectionName: [REQUIRED]\nThe name of the connection to delete.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def delete_crawler(Name=None):
"""
Removes a specified crawler from the AWS Glue Data Catalog, unless the crawler state is RUNNING .
See also: AWS API Documentation
Exceptions
:example: response = client.delete_crawler(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the crawler to remove.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerRunningException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerRunningException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def delete_database(CatalogId=None, Name=None):
"""
Removes a specified database from a Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_database(
CatalogId='string',
Name='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the database resides. If none is provided, the AWS account ID is used by default.
:type Name: string
:param Name: [REQUIRED]\nThe name of the database to delete. For Hive compatibility, this must be all lowercase.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def delete_dev_endpoint(EndpointName=None):
"""
Deletes a specified development endpoint.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_dev_endpoint(
EndpointName='string'
)
:type EndpointName: string
:param EndpointName: [REQUIRED]\nThe name of the DevEndpoint .\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
"""
pass
def delete_job(JobName=None):
"""
Deletes a specified job definition. If the job definition is not found, no exception is thrown.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_job(
JobName='string'
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job definition to delete.\n
:rtype: dict
ReturnsResponse Syntax{
'JobName': 'string'
}
Response Structure
(dict) --
JobName (string) --The name of the job definition that was deleted.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'JobName': 'string'
}
"""
pass
def delete_ml_transform(TransformId=None):
"""
Deletes an AWS Glue machine learning transform. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue. If you no longer need a transform, you can delete it by calling DeleteMLTransforms . However, any AWS Glue jobs that still reference the deleted transform will no longer succeed.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_ml_transform(
TransformId='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the transform to delete.\n
:rtype: dict
ReturnsResponse Syntax{
'TransformId': 'string'
}
Response Structure
(dict) --
TransformId (string) --The unique identifier of the transform that was deleted.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TransformId': 'string'
}
"""
pass
def delete_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionValues=None):
"""
Deletes a specified partition.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionValues=[
'string',
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the partition to be deleted resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the table in question resides.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table that contains the partition to be deleted.\n
:type PartitionValues: list
:param PartitionValues: [REQUIRED]\nThe values that define the partition.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def delete_resource_policy(PolicyHashCondition=None):
"""
Deletes a specified policy.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_resource_policy(
PolicyHashCondition='string'
)
:type PolicyHashCondition: string
:param PolicyHashCondition: The hash value returned when this policy was set.
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.ConditionCheckFailureException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.ConditionCheckFailureException
"""
pass
def delete_security_configuration(Name=None):
"""
Deletes a specified security configuration.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_security_configuration(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the security configuration to delete.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def delete_table(CatalogId=None, DatabaseName=None, Name=None):
"""
Removes a table definition from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_table(
CatalogId='string',
DatabaseName='string',
Name='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type Name: string
:param Name: [REQUIRED]\nThe name of the table to be deleted. For Hive compatibility, this name is entirely lowercase.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def delete_table_version(CatalogId=None, DatabaseName=None, TableName=None, VersionId=None):
"""
Deletes a specified version of a table.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_table_version(
CatalogId='string',
DatabaseName='string',
TableName='string',
VersionId='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table. For Hive compatibility, this name is entirely lowercase.\n
:type VersionId: string
:param VersionId: [REQUIRED]\nThe ID of the table version to be deleted. A VersionID is a string representation of an integer. Each version is incremented by 1.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def delete_trigger(Name=None):
"""
Deletes a specified trigger. If the trigger is not found, no exception is thrown.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_trigger(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the trigger to delete.\n
:rtype: dict
ReturnsResponse Syntax{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --The name of the trigger that was deleted.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
"""
pass
def delete_user_defined_function(CatalogId=None, DatabaseName=None, FunctionName=None):
"""
Deletes an existing function definition from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_user_defined_function(
CatalogId='string',
DatabaseName='string',
FunctionName='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the function to be deleted is located. If none is supplied, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database where the function is located.\n
:type FunctionName: string
:param FunctionName: [REQUIRED]\nThe name of the function definition to be deleted.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def delete_workflow(Name=None):
"""
Deletes a workflow.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_workflow(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the workflow to be deleted.\n
:rtype: dict
ReturnsResponse Syntax{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --Name of the workflow specified in input.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_catalog_import_status(CatalogId=None):
"""
Retrieves the status of a migration operation.
See also: AWS API Documentation
Exceptions
:example: response = client.get_catalog_import_status(
CatalogId='string'
)
:type CatalogId: string
:param CatalogId: The ID of the catalog to migrate. Currently, this should be the AWS account ID.
:rtype: dict
ReturnsResponse Syntax{
'ImportStatus': {
'ImportCompleted': True|False,
'ImportTime': datetime(2015, 1, 1),
'ImportedBy': 'string'
}
}
Response Structure
(dict) --
ImportStatus (dict) --The status of the specified catalog migration.
ImportCompleted (boolean) --
True if the migration has completed, or False otherwise.
ImportTime (datetime) --The time that the migration was started.
ImportedBy (string) --The name of the person who initiated the migration.
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'ImportStatus': {
'ImportCompleted': True|False,
'ImportTime': datetime(2015, 1, 1),
'ImportedBy': 'string'
}
}
"""
pass
def get_classifier(Name=None):
"""
Retrieve a classifier by name.
See also: AWS API Documentation
Exceptions
:example: response = client.get_classifier(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the classifier to retrieve.\n
:rtype: dict
ReturnsResponse Syntax{
'Classifier': {
'GrokClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
'XMLClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'RowTag': 'string'
},
'JsonClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'JsonPath': 'string'
},
'CsvClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
}
}
Response Structure
(dict) --
Classifier (dict) --The requested classifier.
GrokClassifier (dict) --A classifier that uses grok .
Name (string) --The name of the classifier.
Classification (string) --An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, and so on.
CreationTime (datetime) --The time that this classifier was registered.
LastUpdated (datetime) --The time that this classifier was last updated.
Version (integer) --The version of this classifier.
GrokPattern (string) --The grok pattern applied to a data store by this classifier. For more information, see built-in patterns in Writing Custom Classifiers .
CustomPatterns (string) --Optional custom grok patterns defined by this classifier. For more information, see custom patterns in Writing Custom Classifiers .
XMLClassifier (dict) --A classifier for XML content.
Name (string) --The name of the classifier.
Classification (string) --An identifier of the data format that the classifier matches.
CreationTime (datetime) --The time that this classifier was registered.
LastUpdated (datetime) --The time that this classifier was last updated.
Version (integer) --The version of this classifier.
RowTag (string) --The XML tag designating the element that contains each record in an XML document being parsed. This can\'t identify a self-closing element (closed by /> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <row item_a="A" item_b="B"></row> is okay, but <row item_a="A" item_b="B" /> is not).
JsonClassifier (dict) --A classifier for JSON content.
Name (string) --The name of the classifier.
CreationTime (datetime) --The time that this classifier was registered.
LastUpdated (datetime) --The time that this classifier was last updated.
Version (integer) --The version of this classifier.
JsonPath (string) --A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath , as described in Writing JsonPath Custom Classifiers .
CsvClassifier (dict) --A classifier for comma-separated values (CSV).
Name (string) --The name of the classifier.
CreationTime (datetime) --The time that this classifier was registered.
LastUpdated (datetime) --The time that this classifier was last updated.
Version (integer) --The version of this classifier.
Delimiter (string) --A custom symbol to denote what separates each column entry in the row.
QuoteSymbol (string) --A custom symbol to denote what combines content into a single column value. It must be different from the column delimiter.
ContainsHeader (string) --Indicates whether the CSV file contains a header.
Header (list) --A list of strings representing column names.
(string) --
DisableValueTrimming (boolean) --Specifies not to trim values before identifying the type of column values. The default value is true .
AllowSingleColumn (boolean) --Enables the processing of files that contain only one column.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Classifier': {
'GrokClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
'XMLClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'RowTag': 'string'
},
'JsonClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'JsonPath': 'string'
},
'CsvClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
}
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def get_classifiers(MaxResults=None, NextToken=None):
"""
Lists all classifier objects in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_classifiers(
MaxResults=123,
NextToken='string'
)
:type MaxResults: integer
:param MaxResults: The size of the list to return (optional).
:type NextToken: string
:param NextToken: An optional continuation token.
:rtype: dict
ReturnsResponse Syntax
{
'Classifiers': [
{
'GrokClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
'XMLClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'RowTag': 'string'
},
'JsonClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'JsonPath': 'string'
},
'CsvClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Classifiers (list) --
The requested list of classifier objects.
(dict) --
Classifiers are triggered during a crawl task. A classifier checks whether a given file is in a format it can handle. If it is, the classifier creates a schema in the form of a StructType object that matches that data format.
You can use the standard classifiers that AWS Glue provides, or you can write your own classifiers to best categorize your data sources and specify the appropriate schemas to use for them. A classifier can be a grok classifier, an XML classifier, a JSON classifier, or a custom CSV classifier, as specified in one of the fields in the Classifier object.
GrokClassifier (dict) --
A classifier that uses grok .
Name (string) --
The name of the classifier.
Classification (string) --
An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, and so on.
CreationTime (datetime) --
The time that this classifier was registered.
LastUpdated (datetime) --
The time that this classifier was last updated.
Version (integer) --
The version of this classifier.
GrokPattern (string) --
The grok pattern applied to a data store by this classifier. For more information, see built-in patterns in Writing Custom Classifiers .
CustomPatterns (string) --
Optional custom grok patterns defined by this classifier. For more information, see custom patterns in Writing Custom Classifiers .
XMLClassifier (dict) --
A classifier for XML content.
Name (string) --
The name of the classifier.
Classification (string) --
An identifier of the data format that the classifier matches.
CreationTime (datetime) --
The time that this classifier was registered.
LastUpdated (datetime) --
The time that this classifier was last updated.
Version (integer) --
The version of this classifier.
RowTag (string) --
The XML tag designating the element that contains each record in an XML document being parsed. This can\'t identify a self-closing element (closed by /> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <row item_a="A" item_b="B"></row> is okay, but <row item_a="A" item_b="B" /> is not).
JsonClassifier (dict) --
A classifier for JSON content.
Name (string) --
The name of the classifier.
CreationTime (datetime) --
The time that this classifier was registered.
LastUpdated (datetime) --
The time that this classifier was last updated.
Version (integer) --
The version of this classifier.
JsonPath (string) --
A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath , as described in Writing JsonPath Custom Classifiers .
CsvClassifier (dict) --
A classifier for comma-separated values (CSV).
Name (string) --
The name of the classifier.
CreationTime (datetime) --
The time that this classifier was registered.
LastUpdated (datetime) --
The time that this classifier was last updated.
Version (integer) --
The version of this classifier.
Delimiter (string) --
A custom symbol to denote what separates each column entry in the row.
QuoteSymbol (string) --
A custom symbol to denote what combines content into a single column value. It must be different from the column delimiter.
ContainsHeader (string) --
Indicates whether the CSV file contains a header.
Header (list) --
A list of strings representing column names.
(string) --
DisableValueTrimming (boolean) --
Specifies not to trim values before identifying the type of column values. The default value is true .
AllowSingleColumn (boolean) --
Enables the processing of files that contain only one column.
NextToken (string) --
A continuation token.
Exceptions
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Classifiers': [
{
'GrokClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
'XMLClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'RowTag': 'string'
},
'JsonClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'JsonPath': 'string'
},
'CsvClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def get_connection(CatalogId=None, Name=None, HidePassword=None):
"""
Retrieves a connection definition from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_connection(
CatalogId='string',
Name='string',
HidePassword=True|False
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the connection resides. If none is provided, the AWS account ID is used by default.
:type Name: string
:param Name: [REQUIRED]\nThe name of the connection definition to retrieve.\n
:type HidePassword: boolean
:param HidePassword: Allows you to retrieve the connection metadata without returning the password. For instance, the AWS Glue console uses this flag to retrieve the connection, and does not display the password. Set this parameter when the caller might not have permission to use the AWS KMS key to decrypt the password, but it does have permission to access the rest of the connection properties.
:rtype: dict
ReturnsResponse Syntax
{
'Connection': {
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
},
'CreationTime': datetime(2015, 1, 1),
'LastUpdatedTime': datetime(2015, 1, 1),
'LastUpdatedBy': 'string'
}
}
Response Structure
(dict) --
Connection (dict) --
The requested connection definition.
Name (string) --
The name of the connection definition.
Description (string) --
The description of the connection.
ConnectionType (string) --
The type of the connection. Currently, only JDBC is supported; SFTP is not supported.
MatchCriteria (list) --
A list of criteria that can be used in selecting this connection.
(string) --
ConnectionProperties (dict) --
These key-value pairs define parameters for the connection:
HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.
PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.
USER_NAME - The name under which to log in to the database. The value string for USER_NAME is "USERNAME ".
PASSWORD - A password, if one is used, for the user name.
ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.
JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.
JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.
JDBC_ENGINE - The name of the JDBC engine to use.
JDBC_ENGINE_VERSION - The version of the JDBC engine to use.
CONFIG_FILES - (Reserved for future use.)
INSTANCE_ID - The instance ID to use.
JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.
JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.
CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer\'s root certificate. AWS Glue uses this root certificate to validate the customer\xe2\x80\x99s certificate when connecting to the customer database. AWS Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.
SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false . AWS Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip AWS Glue\xe2\x80\x99s validation of the customer certificate.
CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN ; in Microsoft SQL Server, this is used as the hostNameInCertificate .
CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.
KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.
(string) --
(string) --
PhysicalConnectionRequirements (dict) --
A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup , that are needed to make this connection successfully.
SubnetId (string) --
The subnet ID used by the connection.
SecurityGroupIdList (list) --
The security group ID list used by the connection.
(string) --
AvailabilityZone (string) --
The connection\'s Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.
CreationTime (datetime) --
The time that this connection definition was created.
LastUpdatedTime (datetime) --
The last time that this connection definition was updated.
LastUpdatedBy (string) --
The user, group, or role that last updated this connection definition.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Connection': {
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
},
'CreationTime': datetime(2015, 1, 1),
'LastUpdatedTime': datetime(2015, 1, 1),
'LastUpdatedBy': 'string'
}
}
:returns:
(string) --
"""
pass
def get_connections(CatalogId=None, Filter=None, HidePassword=None, NextToken=None, MaxResults=None):
"""
Retrieves a list of connection definitions from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_connections(
CatalogId='string',
Filter={
'MatchCriteria': [
'string',
],
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA'
},
HidePassword=True|False,
NextToken='string',
MaxResults=123
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the connections reside. If none is provided, the AWS account ID is used by default.
:type Filter: dict
:param Filter: A filter that controls which connections are returned.\n\nMatchCriteria (list) --A criteria string that must match the criteria recorded in the connection definition for that connection definition to be returned.\n\n(string) --\n\n\nConnectionType (string) --The type of connections to return. Currently, only JDBC is supported; SFTP is not supported.\n\n\n
:type HidePassword: boolean
:param HidePassword: Allows you to retrieve the connection metadata without returning the password. For instance, the AWS Glue console uses this flag to retrieve the connection, and does not display the password. Set this parameter when the caller might not have permission to use the AWS KMS key to decrypt the password, but it does have permission to access the rest of the connection properties.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:type MaxResults: integer
:param MaxResults: The maximum number of connections to return in one response.
:rtype: dict
ReturnsResponse Syntax
{
'ConnectionList': [
{
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
},
'CreationTime': datetime(2015, 1, 1),
'LastUpdatedTime': datetime(2015, 1, 1),
'LastUpdatedBy': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
ConnectionList (list) --
A list of requested connection definitions.
(dict) --
Defines a connection to a data source.
Name (string) --
The name of the connection definition.
Description (string) --
The description of the connection.
ConnectionType (string) --
The type of the connection. Currently, only JDBC is supported; SFTP is not supported.
MatchCriteria (list) --
A list of criteria that can be used in selecting this connection.
(string) --
ConnectionProperties (dict) --
These key-value pairs define parameters for the connection:
HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.
PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.
USER_NAME - The name under which to log in to the database. The value string for USER_NAME is "USERNAME ".
PASSWORD - A password, if one is used, for the user name.
ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.
JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.
JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.
JDBC_ENGINE - The name of the JDBC engine to use.
JDBC_ENGINE_VERSION - The version of the JDBC engine to use.
CONFIG_FILES - (Reserved for future use.)
INSTANCE_ID - The instance ID to use.
JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.
JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.
CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer\'s root certificate. AWS Glue uses this root certificate to validate the customer\xe2\x80\x99s certificate when connecting to the customer database. AWS Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.
SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false . AWS Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip AWS Glue\xe2\x80\x99s validation of the customer certificate.
CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN ; in Microsoft SQL Server, this is used as the hostNameInCertificate .
CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.
KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.
(string) --
(string) --
PhysicalConnectionRequirements (dict) --
A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup , that are needed to make this connection successfully.
SubnetId (string) --
The subnet ID used by the connection.
SecurityGroupIdList (list) --
The security group ID list used by the connection.
(string) --
AvailabilityZone (string) --
The connection\'s Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.
CreationTime (datetime) --
The time that this connection definition was created.
LastUpdatedTime (datetime) --
The last time that this connection definition was updated.
LastUpdatedBy (string) --
The user, group, or role that last updated this connection definition.
NextToken (string) --
A continuation token, if the list of connections returned does not include the last of the filtered connections.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'ConnectionList': [
{
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
},
'CreationTime': datetime(2015, 1, 1),
'LastUpdatedTime': datetime(2015, 1, 1),
'LastUpdatedBy': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def get_crawler(Name=None):
"""
Retrieves metadata for a specified crawler.
See also: AWS API Documentation
Exceptions
:example: response = client.get_crawler(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the crawler to retrieve metadata for.\n
:rtype: dict
ReturnsResponse Syntax{
'Crawler': {
'Name': 'string',
'Role': 'string',
'Targets': {
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
'DatabaseName': 'string',
'Description': 'string',
'Classifiers': [
'string',
],
'SchemaChangePolicy': {
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
'State': 'READY'|'RUNNING'|'STOPPING',
'TablePrefix': 'string',
'Schedule': {
'ScheduleExpression': 'string',
'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'
},
'CrawlElapsedTime': 123,
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'LastCrawl': {
'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string',
'MessagePrefix': 'string',
'StartTime': datetime(2015, 1, 1)
},
'Version': 123,
'Configuration': 'string',
'CrawlerSecurityConfiguration': 'string'
}
}
Response Structure
(dict) --
Crawler (dict) --The metadata for the specified crawler.
Name (string) --The name of the crawler.
Role (string) --The Amazon Resource Name (ARN) of an IAM role that\'s used to access customer resources, such as Amazon Simple Storage Service (Amazon S3) data.
Targets (dict) --A collection of targets to crawl.
S3Targets (list) --Specifies Amazon Simple Storage Service (Amazon S3) targets.
(dict) --Specifies a data store in Amazon Simple Storage Service (Amazon S3).
Path (string) --The path to the Amazon S3 target.
Exclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .
(string) --
JdbcTargets (list) --Specifies JDBC targets.
(dict) --Specifies a JDBC data store to crawl.
ConnectionName (string) --The name of the connection to use to connect to the JDBC target.
Path (string) --The path of the JDBC target.
Exclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .
(string) --
DynamoDBTargets (list) --Specifies Amazon DynamoDB targets.
(dict) --Specifies an Amazon DynamoDB table to crawl.
Path (string) --The name of the DynamoDB table to crawl.
CatalogTargets (list) --Specifies AWS Glue Data Catalog targets.
(dict) --Specifies an AWS Glue Data Catalog target.
DatabaseName (string) --The name of the database to be synchronized.
Tables (list) --A list of the tables to be synchronized.
(string) --
DatabaseName (string) --The name of the database in which the crawler\'s output is stored.
Description (string) --A description of the crawler.
Classifiers (list) --A list of UTF-8 strings that specify the custom classifiers that are associated with the crawler.
(string) --
SchemaChangePolicy (dict) --The policy that specifies update and delete behaviors for the crawler.
UpdateBehavior (string) --The update behavior when the crawler finds a changed schema.
DeleteBehavior (string) --The deletion behavior when the crawler finds a deleted object.
State (string) --Indicates whether the crawler is running, or whether a run is pending.
TablePrefix (string) --The prefix added to the names of tables that are created.
Schedule (dict) --For scheduled crawlers, the schedule when the crawler runs.
ScheduleExpression (string) --A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .
State (string) --The state of the schedule.
CrawlElapsedTime (integer) --If the crawler is running, contains the total time elapsed since the last crawl began.
CreationTime (datetime) --The time that the crawler was created.
LastUpdated (datetime) --The time that the crawler was last updated.
LastCrawl (dict) --The status of the last crawl, and potentially error information if an error occurred.
Status (string) --Status of the last crawl.
ErrorMessage (string) --If an error occurred, the error information about the last crawl.
LogGroup (string) --The log group for the last crawl.
LogStream (string) --The log stream for the last crawl.
MessagePrefix (string) --The prefix for a message about this crawl.
StartTime (datetime) --The time at which the crawl started.
Version (integer) --The version of the crawler.
Configuration (string) --Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\'s behavior. For more information, see Configuring a Crawler .
CrawlerSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used by this crawler.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Crawler': {
'Name': 'string',
'Role': 'string',
'Targets': {
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
'DatabaseName': 'string',
'Description': 'string',
'Classifiers': [
'string',
],
'SchemaChangePolicy': {
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
'State': 'READY'|'RUNNING'|'STOPPING',
'TablePrefix': 'string',
'Schedule': {
'ScheduleExpression': 'string',
'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'
},
'CrawlElapsedTime': 123,
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'LastCrawl': {
'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string',
'MessagePrefix': 'string',
'StartTime': datetime(2015, 1, 1)
},
'Version': 123,
'Configuration': 'string',
'CrawlerSecurityConfiguration': 'string'
}
}
:returns:
(string) --
"""
pass
def get_crawler_metrics(CrawlerNameList=None, MaxResults=None, NextToken=None):
"""
Retrieves metrics about specified crawlers.
See also: AWS API Documentation
Exceptions
:example: response = client.get_crawler_metrics(
CrawlerNameList=[
'string',
],
MaxResults=123,
NextToken='string'
)
:type CrawlerNameList: list
:param CrawlerNameList: A list of the names of crawlers about which to retrieve metrics.\n\n(string) --\n\n
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:rtype: dict
ReturnsResponse Syntax
{
'CrawlerMetricsList': [
{
'CrawlerName': 'string',
'TimeLeftSeconds': 123.0,
'StillEstimating': True|False,
'LastRuntimeSeconds': 123.0,
'MedianRuntimeSeconds': 123.0,
'TablesCreated': 123,
'TablesUpdated': 123,
'TablesDeleted': 123
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
CrawlerMetricsList (list) --
A list of metrics for the specified crawler.
(dict) --
Metrics for a specified crawler.
CrawlerName (string) --
The name of the crawler.
TimeLeftSeconds (float) --
The estimated time left to complete a running crawl.
StillEstimating (boolean) --
True if the crawler is still estimating how long it will take to complete this run.
LastRuntimeSeconds (float) --
The duration of the crawler\'s most recent run, in seconds.
MedianRuntimeSeconds (float) --
The median duration of this crawler\'s runs, in seconds.
TablesCreated (integer) --
The number of tables created by this crawler.
TablesUpdated (integer) --
The number of tables updated by this crawler.
TablesDeleted (integer) --
The number of tables deleted by this crawler.
NextToken (string) --
A continuation token, if the returned list does not contain the last metric available.
Exceptions
Glue.Client.exceptions.OperationTimeoutException
:return: {
'CrawlerMetricsList': [
{
'CrawlerName': 'string',
'TimeLeftSeconds': 123.0,
'StillEstimating': True|False,
'LastRuntimeSeconds': 123.0,
'MedianRuntimeSeconds': 123.0,
'TablesCreated': 123,
'TablesUpdated': 123,
'TablesDeleted': 123
},
],
'NextToken': 'string'
}
:returns:
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def get_crawlers(MaxResults=None, NextToken=None):
"""
Retrieves metadata for all crawlers defined in the customer account.
See also: AWS API Documentation
Exceptions
:example: response = client.get_crawlers(
MaxResults=123,
NextToken='string'
)
:type MaxResults: integer
:param MaxResults: The number of crawlers to return on each call.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:rtype: dict
ReturnsResponse Syntax
{
'Crawlers': [
{
'Name': 'string',
'Role': 'string',
'Targets': {
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
'DatabaseName': 'string',
'Description': 'string',
'Classifiers': [
'string',
],
'SchemaChangePolicy': {
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
'State': 'READY'|'RUNNING'|'STOPPING',
'TablePrefix': 'string',
'Schedule': {
'ScheduleExpression': 'string',
'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'
},
'CrawlElapsedTime': 123,
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'LastCrawl': {
'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string',
'MessagePrefix': 'string',
'StartTime': datetime(2015, 1, 1)
},
'Version': 123,
'Configuration': 'string',
'CrawlerSecurityConfiguration': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Crawlers (list) --
A list of crawler metadata.
(dict) --
Specifies a crawler program that examines a data source and uses classifiers to try to determine its schema. If successful, the crawler records metadata concerning the data source in the AWS Glue Data Catalog.
Name (string) --
The name of the crawler.
Role (string) --
The Amazon Resource Name (ARN) of an IAM role that\'s used to access customer resources, such as Amazon Simple Storage Service (Amazon S3) data.
Targets (dict) --
A collection of targets to crawl.
S3Targets (list) --
Specifies Amazon Simple Storage Service (Amazon S3) targets.
(dict) --
Specifies a data store in Amazon Simple Storage Service (Amazon S3).
Path (string) --
The path to the Amazon S3 target.
Exclusions (list) --
A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .
(string) --
JdbcTargets (list) --
Specifies JDBC targets.
(dict) --
Specifies a JDBC data store to crawl.
ConnectionName (string) --
The name of the connection to use to connect to the JDBC target.
Path (string) --
The path of the JDBC target.
Exclusions (list) --
A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .
(string) --
DynamoDBTargets (list) --
Specifies Amazon DynamoDB targets.
(dict) --
Specifies an Amazon DynamoDB table to crawl.
Path (string) --
The name of the DynamoDB table to crawl.
CatalogTargets (list) --
Specifies AWS Glue Data Catalog targets.
(dict) --
Specifies an AWS Glue Data Catalog target.
DatabaseName (string) --
The name of the database to be synchronized.
Tables (list) --
A list of the tables to be synchronized.
(string) --
DatabaseName (string) --
The name of the database in which the crawler\'s output is stored.
Description (string) --
A description of the crawler.
Classifiers (list) --
A list of UTF-8 strings that specify the custom classifiers that are associated with the crawler.
(string) --
SchemaChangePolicy (dict) --
The policy that specifies update and delete behaviors for the crawler.
UpdateBehavior (string) --
The update behavior when the crawler finds a changed schema.
DeleteBehavior (string) --
The deletion behavior when the crawler finds a deleted object.
State (string) --
Indicates whether the crawler is running, or whether a run is pending.
TablePrefix (string) --
The prefix added to the names of tables that are created.
Schedule (dict) --
For scheduled crawlers, the schedule when the crawler runs.
ScheduleExpression (string) --
A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .
State (string) --
The state of the schedule.
CrawlElapsedTime (integer) --
If the crawler is running, contains the total time elapsed since the last crawl began.
CreationTime (datetime) --
The time that the crawler was created.
LastUpdated (datetime) --
The time that the crawler was last updated.
LastCrawl (dict) --
The status of the last crawl, and potentially error information if an error occurred.
Status (string) --
Status of the last crawl.
ErrorMessage (string) --
If an error occurred, the error information about the last crawl.
LogGroup (string) --
The log group for the last crawl.
LogStream (string) --
The log stream for the last crawl.
MessagePrefix (string) --
The prefix for a message about this crawl.
StartTime (datetime) --
The time at which the crawl started.
Version (integer) --
The version of the crawler.
Configuration (string) --
Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\'s behavior. For more information, see Configuring a Crawler .
CrawlerSecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used by this crawler.
NextToken (string) --
A continuation token, if the returned list has not reached the end of those defined in this customer account.
Exceptions
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Crawlers': [
{
'Name': 'string',
'Role': 'string',
'Targets': {
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
'DatabaseName': 'string',
'Description': 'string',
'Classifiers': [
'string',
],
'SchemaChangePolicy': {
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
'State': 'READY'|'RUNNING'|'STOPPING',
'TablePrefix': 'string',
'Schedule': {
'ScheduleExpression': 'string',
'State': 'SCHEDULED'|'NOT_SCHEDULED'|'TRANSITIONING'
},
'CrawlElapsedTime': 123,
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'LastCrawl': {
'Status': 'SUCCEEDED'|'CANCELLED'|'FAILED',
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string',
'MessagePrefix': 'string',
'StartTime': datetime(2015, 1, 1)
},
'Version': 123,
'Configuration': 'string',
'CrawlerSecurityConfiguration': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def get_data_catalog_encryption_settings(CatalogId=None):
"""
Retrieves the security configuration for a specified catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_data_catalog_encryption_settings(
CatalogId='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog to retrieve the security configuration for. If none is provided, the AWS account ID is used by default.
:rtype: dict
ReturnsResponse Syntax{
'DataCatalogEncryptionSettings': {
'EncryptionAtRest': {
'CatalogEncryptionMode': 'DISABLED'|'SSE-KMS',
'SseAwsKmsKeyId': 'string'
},
'ConnectionPasswordEncryption': {
'ReturnConnectionPasswordEncrypted': True|False,
'AwsKmsKeyId': 'string'
}
}
}
Response Structure
(dict) --
DataCatalogEncryptionSettings (dict) --The requested security configuration.
EncryptionAtRest (dict) --Specifies the encryption-at-rest configuration for the Data Catalog.
CatalogEncryptionMode (string) --The encryption-at-rest mode for encrypting Data Catalog data.
SseAwsKmsKeyId (string) --The ID of the AWS KMS key to use for encryption at rest.
ConnectionPasswordEncryption (dict) --When connection password protection is enabled, the Data Catalog uses a customer-provided key to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption.
ReturnConnectionPasswordEncrypted (boolean) --When the ReturnConnectionPasswordEncrypted flag is set to "true", passwords remain encrypted in the responses of GetConnection and GetConnections . This encryption takes effect independently from catalog encryption.
AwsKmsKeyId (string) --An AWS KMS key that is used to encrypt the connection password.
If connection password protection is enabled, the caller of CreateConnection and UpdateConnection needs at least kms:Encrypt permission on the specified AWS KMS key, to encrypt passwords before storing them in the Data Catalog.
You can set the decrypt permission to enable or restrict access on the password key according to your security requirements.
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'DataCatalogEncryptionSettings': {
'EncryptionAtRest': {
'CatalogEncryptionMode': 'DISABLED'|'SSE-KMS',
'SseAwsKmsKeyId': 'string'
},
'ConnectionPasswordEncryption': {
'ReturnConnectionPasswordEncrypted': True|False,
'AwsKmsKeyId': 'string'
}
}
}
"""
pass
def get_database(CatalogId=None, Name=None):
"""
Retrieves the definition of a specified database.
See also: AWS API Documentation
Exceptions
:example: response = client.get_database(
CatalogId='string',
Name='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the database resides. If none is provided, the AWS account ID is used by default.
:type Name: string
:param Name: [REQUIRED]\nThe name of the database to retrieve. For Hive compatibility, this should be all lowercase.\n
:rtype: dict
ReturnsResponse Syntax
{
'Database': {
'Name': 'string',
'Description': 'string',
'LocationUri': 'string',
'Parameters': {
'string': 'string'
},
'CreateTime': datetime(2015, 1, 1),
'CreateTableDefaultPermissions': [
{
'Principal': {
'DataLakePrincipalIdentifier': 'string'
},
'Permissions': [
'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',
]
},
]
}
}
Response Structure
(dict) --
Database (dict) --
The definition of the specified database in the Data Catalog.
Name (string) --
The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.
Description (string) --
A description of the database.
LocationUri (string) --
The location of the database (for example, an HDFS path).
Parameters (dict) --
These key-value pairs define parameters and properties of the database.
(string) --
(string) --
CreateTime (datetime) --
The time at which the metadata database was created in the catalog.
CreateTableDefaultPermissions (list) --
Creates a set of default permissions on the table for principals.
(dict) --
Permissions granted to a principal.
Principal (dict) --
The principal who is granted permissions.
DataLakePrincipalIdentifier (string) --
An identifier for the AWS Lake Formation principal.
Permissions (list) --
The permissions that are granted to the principal.
(string) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Database': {
'Name': 'string',
'Description': 'string',
'LocationUri': 'string',
'Parameters': {
'string': 'string'
},
'CreateTime': datetime(2015, 1, 1),
'CreateTableDefaultPermissions': [
{
'Principal': {
'DataLakePrincipalIdentifier': 'string'
},
'Permissions': [
'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',
]
},
]
}
}
:returns:
(string) --
(string) --
"""
pass
def get_databases(CatalogId=None, NextToken=None, MaxResults=None):
"""
Retrieves all databases defined in a given Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_databases(
CatalogId='string',
NextToken='string',
MaxResults=123
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog from which to retrieve Databases . If none is provided, the AWS account ID is used by default.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:type MaxResults: integer
:param MaxResults: The maximum number of databases to return in one response.
:rtype: dict
ReturnsResponse Syntax
{
'DatabaseList': [
{
'Name': 'string',
'Description': 'string',
'LocationUri': 'string',
'Parameters': {
'string': 'string'
},
'CreateTime': datetime(2015, 1, 1),
'CreateTableDefaultPermissions': [
{
'Principal': {
'DataLakePrincipalIdentifier': 'string'
},
'Permissions': [
'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',
]
},
]
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
DatabaseList (list) --
A list of Database objects from the specified catalog.
(dict) --
The Database object represents a logical grouping of tables that might reside in a Hive metastore or an RDBMS.
Name (string) --
The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.
Description (string) --
A description of the database.
LocationUri (string) --
The location of the database (for example, an HDFS path).
Parameters (dict) --
These key-value pairs define parameters and properties of the database.
(string) --
(string) --
CreateTime (datetime) --
The time at which the metadata database was created in the catalog.
CreateTableDefaultPermissions (list) --
Creates a set of default permissions on the table for principals.
(dict) --
Permissions granted to a principal.
Principal (dict) --
The principal who is granted permissions.
DataLakePrincipalIdentifier (string) --
An identifier for the AWS Lake Formation principal.
Permissions (list) --
The permissions that are granted to the principal.
(string) --
NextToken (string) --
A continuation token for paginating the returned list of tokens, returned if the current segment of the list is not the last.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'DatabaseList': [
{
'Name': 'string',
'Description': 'string',
'LocationUri': 'string',
'Parameters': {
'string': 'string'
},
'CreateTime': datetime(2015, 1, 1),
'CreateTableDefaultPermissions': [
{
'Principal': {
'DataLakePrincipalIdentifier': 'string'
},
'Permissions': [
'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',
]
},
]
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def get_dataflow_graph(PythonScript=None):
"""
Transforms a Python script into a directed acyclic graph (DAG).
See also: AWS API Documentation
Exceptions
:example: response = client.get_dataflow_graph(
PythonScript='string'
)
:type PythonScript: string
:param PythonScript: The Python script to transform.
:rtype: dict
ReturnsResponse Syntax{
'DagNodes': [
{
'Id': 'string',
'NodeType': 'string',
'Args': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'LineNumber': 123
},
],
'DagEdges': [
{
'Source': 'string',
'Target': 'string',
'TargetParameter': 'string'
},
]
}
Response Structure
(dict) --
DagNodes (list) --A list of the nodes in the resulting DAG.
(dict) --Represents a node in a directed acyclic graph (DAG)
Id (string) --A node identifier that is unique within the node\'s graph.
NodeType (string) --The type of node that this is.
Args (list) --Properties of the node, in the form of name-value pairs.
(dict) --An argument or property of a node.
Name (string) --The name of the argument or property.
Value (string) --The value of the argument or property.
Param (boolean) --True if the value is used as a parameter.
LineNumber (integer) --The line number of the node.
DagEdges (list) --A list of the edges in the resulting DAG.
(dict) --Represents a directional edge in a directed acyclic graph (DAG).
Source (string) --The ID of the node at which the edge starts.
Target (string) --The ID of the node at which the edge ends.
TargetParameter (string) --The target of the edge.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'DagNodes': [
{
'Id': 'string',
'NodeType': 'string',
'Args': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'LineNumber': 123
},
],
'DagEdges': [
{
'Source': 'string',
'Target': 'string',
'TargetParameter': 'string'
},
]
}
"""
pass
def get_dev_endpoint(EndpointName=None):
"""
Retrieves information about a specified development endpoint.
See also: AWS API Documentation
Exceptions
:example: response = client.get_dev_endpoint(
EndpointName='string'
)
:type EndpointName: string
:param EndpointName: [REQUIRED]\nName of the DevEndpoint to retrieve information for.\n
:rtype: dict
ReturnsResponse Syntax{
'DevEndpoint': {
'EndpointName': 'string',
'RoleArn': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'YarnEndpointAddress': 'string',
'PrivateAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'PublicAddress': 'string',
'Status': 'string',
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'NumberOfNodes': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'LastUpdateStatus': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'LastModifiedTimestamp': datetime(2015, 1, 1),
'PublicKey': 'string',
'PublicKeys': [
'string',
],
'SecurityConfiguration': 'string',
'Arguments': {
'string': 'string'
}
}
}
Response Structure
(dict) --
DevEndpoint (dict) --A DevEndpoint definition.
EndpointName (string) --The name of the DevEndpoint .
RoleArn (string) --The Amazon Resource Name (ARN) of the IAM role used in this DevEndpoint .
SecurityGroupIds (list) --A list of security group identifiers used in this DevEndpoint .
(string) --
SubnetId (string) --The subnet ID for this DevEndpoint .
YarnEndpointAddress (string) --The YARN endpoint address used by this DevEndpoint .
PrivateAddress (string) --A private IP address to access the DevEndpoint within a VPC if the DevEndpoint is created within one. The PrivateAddress field is present only when you create the DevEndpoint within your VPC.
ZeppelinRemoteSparkInterpreterPort (integer) --The Apache Zeppelin port for the remote Apache Spark interpreter.
PublicAddress (string) --The public IP address used by this DevEndpoint . The PublicAddress field is present only when you create a non-virtual private cloud (VPC) DevEndpoint .
Status (string) --The current status of this DevEndpoint .
WorkerType (string) --The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
Known issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.
GlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Development endpoints that are created without specifying a Glue version default to Glue 0.9.
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
NumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated to the development endpoint.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
NumberOfNodes (integer) --The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint .
AvailabilityZone (string) --The AWS Availability Zone where this DevEndpoint is located.
VpcId (string) --The ID of the virtual private cloud (VPC) used by this DevEndpoint .
ExtraPythonLibsS3Path (string) --The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.
Note
You can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not currently supported.
ExtraJarsS3Path (string) --The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .
Note
You can only use pure Java/Scala libraries with a DevEndpoint .
FailureReason (string) --The reason for a current failure in this DevEndpoint .
LastUpdateStatus (string) --The status of the last update.
CreatedTimestamp (datetime) --The point in time at which this DevEndpoint was created.
LastModifiedTimestamp (datetime) --The point in time at which this DevEndpoint was last modified.
PublicKey (string) --The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.
PublicKeys (list) --A list of public keys to be used by the DevEndpoints for authentication. Using this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.
Note
If you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys. Call the UpdateDevEndpoint API operation with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.
(string) --
SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this DevEndpoint .
Arguments (dict) --A map of arguments used to configure the DevEndpoint .
Valid arguments are:
"--enable-glue-datacatalog": ""
"GLUE_PYTHON_VERSION": "3"
"GLUE_PYTHON_VERSION": "2"
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
(string) --
(string) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'DevEndpoint': {
'EndpointName': 'string',
'RoleArn': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'YarnEndpointAddress': 'string',
'PrivateAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'PublicAddress': 'string',
'Status': 'string',
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'NumberOfNodes': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'LastUpdateStatus': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'LastModifiedTimestamp': datetime(2015, 1, 1),
'PublicKey': 'string',
'PublicKeys': [
'string',
],
'SecurityConfiguration': 'string',
'Arguments': {
'string': 'string'
}
}
}
:returns:
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
"""
pass
def get_dev_endpoints(MaxResults=None, NextToken=None):
"""
Retrieves all the development endpoints in this AWS account.
See also: AWS API Documentation
Exceptions
:example: response = client.get_dev_endpoints(
MaxResults=123,
NextToken='string'
)
:type MaxResults: integer
:param MaxResults: The maximum size of information to return.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:rtype: dict
ReturnsResponse Syntax
{
'DevEndpoints': [
{
'EndpointName': 'string',
'RoleArn': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'YarnEndpointAddress': 'string',
'PrivateAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'PublicAddress': 'string',
'Status': 'string',
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'NumberOfNodes': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'LastUpdateStatus': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'LastModifiedTimestamp': datetime(2015, 1, 1),
'PublicKey': 'string',
'PublicKeys': [
'string',
],
'SecurityConfiguration': 'string',
'Arguments': {
'string': 'string'
}
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
DevEndpoints (list) --
A list of DevEndpoint definitions.
(dict) --
A development endpoint where a developer can remotely debug extract, transform, and load (ETL) scripts.
EndpointName (string) --
The name of the DevEndpoint .
RoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role used in this DevEndpoint .
SecurityGroupIds (list) --
A list of security group identifiers used in this DevEndpoint .
(string) --
SubnetId (string) --
The subnet ID for this DevEndpoint .
YarnEndpointAddress (string) --
The YARN endpoint address used by this DevEndpoint .
PrivateAddress (string) --
A private IP address to access the DevEndpoint within a VPC if the DevEndpoint is created within one. The PrivateAddress field is present only when you create the DevEndpoint within your VPC.
ZeppelinRemoteSparkInterpreterPort (integer) --
The Apache Zeppelin port for the remote Apache Spark interpreter.
PublicAddress (string) --
The public IP address used by this DevEndpoint . The PublicAddress field is present only when you create a non-virtual private cloud (VPC) DevEndpoint .
Status (string) --
The current status of this DevEndpoint .
WorkerType (string) --
The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
Known issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Development endpoints that are created without specifying a Glue version default to Glue 0.9.
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated to the development endpoint.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
NumberOfNodes (integer) --
The number of AWS Glue Data Processing Units (DPUs) allocated to this DevEndpoint .
AvailabilityZone (string) --
The AWS Availability Zone where this DevEndpoint is located.
VpcId (string) --
The ID of the virtual private cloud (VPC) used by this DevEndpoint .
ExtraPythonLibsS3Path (string) --
The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.
Note
You can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not currently supported.
ExtraJarsS3Path (string) --
The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .
Note
You can only use pure Java/Scala libraries with a DevEndpoint .
FailureReason (string) --
The reason for a current failure in this DevEndpoint .
LastUpdateStatus (string) --
The status of the last update.
CreatedTimestamp (datetime) --
The point in time at which this DevEndpoint was created.
LastModifiedTimestamp (datetime) --
The point in time at which this DevEndpoint was last modified.
PublicKey (string) --
The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.
PublicKeys (list) --
A list of public keys to be used by the DevEndpoints for authentication. Using this attribute is preferred over a single public key because the public keys allow you to have a different private key per client.
Note
If you previously created an endpoint with a public key, you must remove that key to be able to set a list of public keys. Call the UpdateDevEndpoint API operation with the public key content in the deletePublicKeys attribute, and the list of new keys in the addPublicKeys attribute.
(string) --
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this DevEndpoint .
Arguments (dict) --
A map of arguments used to configure the DevEndpoint .
Valid arguments are:
"--enable-glue-datacatalog": ""
"GLUE_PYTHON_VERSION": "3"
"GLUE_PYTHON_VERSION": "2"
You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.
(string) --
(string) --
NextToken (string) --
A continuation token, if not all DevEndpoint definitions have yet been returned.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'DevEndpoints': [
{
'EndpointName': 'string',
'RoleArn': 'string',
'SecurityGroupIds': [
'string',
],
'SubnetId': 'string',
'YarnEndpointAddress': 'string',
'PrivateAddress': 'string',
'ZeppelinRemoteSparkInterpreterPort': 123,
'PublicAddress': 'string',
'Status': 'string',
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'GlueVersion': 'string',
'NumberOfWorkers': 123,
'NumberOfNodes': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string',
'FailureReason': 'string',
'LastUpdateStatus': 'string',
'CreatedTimestamp': datetime(2015, 1, 1),
'LastModifiedTimestamp': datetime(2015, 1, 1),
'PublicKey': 'string',
'PublicKeys': [
'string',
],
'SecurityConfiguration': 'string',
'Arguments': {
'string': 'string'
}
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def get_job(JobName=None):
"""
Retrieves an existing job definition.
See also: AWS API Documentation
Exceptions
:example: response = client.get_job(
JobName='string'
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job definition to retrieve.\n
:rtype: dict
ReturnsResponse Syntax{
'Job': {
'Name': 'string',
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
}
}
Response Structure
(dict) --
Job (dict) --The requested job definition.
Name (string) --The name you assign to this job definition.
Description (string) --A description of the job.
LogUri (string) --This field is reserved for future use.
Role (string) --The name or Amazon Resource Name (ARN) of the IAM role associated with this job.
CreatedOn (datetime) --The time and date that this job definition was created.
LastModifiedOn (datetime) --The last point in time when this job definition was modified.
ExecutionProperty (dict) --An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.
MaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.
Command (dict) --The JobCommand that executes this job.
Name (string) --The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .
ScriptLocation (string) --Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.
PythonVersion (string) --The Python version being used to execute a Python shell job. Allowed values are 2 or 3.
DefaultArguments (dict) --The default arguments for this job, specified as name-value pairs.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
NonOverridableArguments (dict) --Non-overridable arguments for this job, specified as name-value pairs.
(string) --
(string) --
Connections (dict) --The connections used for this job.
Connections (list) --A list of connections used by the job.
(string) --
MaxRetries (integer) --The maximum number of times to retry this job after a JobRun fails.
AllocatedCapacity (integer) --This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to runs of this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Timeout (integer) --The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
MaxCapacity (float) --The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
NumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this job.
NotificationProperty (dict) --Specifies configuration properties of a job notification.
NotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Job': {
'Name': 'string',
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
}
}
:returns:
(string) --
(string) --
"""
pass
def get_job_bookmark(JobName=None, RunId=None):
"""
Returns information on a job bookmark entry.
See also: AWS API Documentation
Exceptions
:example: response = client.get_job_bookmark(
JobName='string',
RunId='string'
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job in question.\n
:type RunId: string
:param RunId: The unique run identifier associated with this job run.
:rtype: dict
ReturnsResponse Syntax
{
'JobBookmarkEntry': {
'JobName': 'string',
'Version': 123,
'Run': 123,
'Attempt': 123,
'PreviousRunId': 'string',
'RunId': 'string',
'JobBookmark': 'string'
}
}
Response Structure
(dict) --
JobBookmarkEntry (dict) --
A structure that defines a point that a job can resume processing.
JobName (string) --
The name of the job in question.
Version (integer) --
The version of the job.
Run (integer) --
The run ID number.
Attempt (integer) --
The attempt ID number.
PreviousRunId (string) --
The unique run identifier associated with the previous job run.
RunId (string) --
The run ID number.
JobBookmark (string) --
The bookmark itself.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ValidationException
:return: {
'JobBookmarkEntry': {
'JobName': 'string',
'Version': 123,
'Run': 123,
'Attempt': 123,
'PreviousRunId': 'string',
'RunId': 'string',
'JobBookmark': 'string'
}
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ValidationException
"""
pass
def get_job_run(JobName=None, RunId=None, PredecessorsIncluded=None):
"""
Retrieves the metadata for a given job run.
See also: AWS API Documentation
Exceptions
:example: response = client.get_job_run(
JobName='string',
RunId='string',
PredecessorsIncluded=True|False
)
:type JobName: string
:param JobName: [REQUIRED]\nName of the job definition being run.\n
:type RunId: string
:param RunId: [REQUIRED]\nThe ID of the job run.\n
:type PredecessorsIncluded: boolean
:param PredecessorsIncluded: True if a list of predecessor runs should be returned.
:rtype: dict
ReturnsResponse Syntax
{
'JobRun': {
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
}
}
Response Structure
(dict) --
JobRun (dict) --
The requested job-run metadata.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'JobRun': {
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
}
}
:returns:
(string) --
(string) --
"""
pass
def get_job_runs(JobName=None, NextToken=None, MaxResults=None):
"""
Retrieves metadata for all runs of a given job definition.
See also: AWS API Documentation
Exceptions
:example: response = client.get_job_runs(
JobName='string',
NextToken='string',
MaxResults=123
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job definition for which to retrieve all job runs.\n
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:type MaxResults: integer
:param MaxResults: The maximum size of the response.
:rtype: dict
ReturnsResponse Syntax
{
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
JobRuns (list) --
A list of job-run metadata objects.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
NextToken (string) --
A continuation token, if not all requested job runs have been returned.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def get_jobs(NextToken=None, MaxResults=None):
"""
Retrieves all current job definitions.
See also: AWS API Documentation
Exceptions
:example: response = client.get_jobs(
NextToken='string',
MaxResults=123
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:type MaxResults: integer
:param MaxResults: The maximum size of the response.
:rtype: dict
ReturnsResponse Syntax
{
'Jobs': [
{
'Name': 'string',
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Jobs (list) --
A list of job definitions.
(dict) --
Specifies a job definition.
Name (string) --
The name you assign to this job definition.
Description (string) --
A description of the job.
LogUri (string) --
This field is reserved for future use.
Role (string) --
The name or Amazon Resource Name (ARN) of the IAM role associated with this job.
CreatedOn (datetime) --
The time and date that this job definition was created.
LastModifiedOn (datetime) --
The last point in time when this job definition was modified.
ExecutionProperty (dict) --
An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.
MaxConcurrentRuns (integer) --
The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.
Command (dict) --
The JobCommand that executes this job.
Name (string) --
The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .
ScriptLocation (string) --
Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.
PythonVersion (string) --
The Python version being used to execute a Python shell job. Allowed values are 2 or 3.
DefaultArguments (dict) --
The default arguments for this job, specified as name-value pairs.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
NonOverridableArguments (dict) --
Non-overridable arguments for this job, specified as name-value pairs.
(string) --
(string) --
Connections (dict) --
The connections used for this job.
Connections (list) --
A list of connections used by the job.
(string) --
MaxRetries (integer) --
The maximum number of times to retry this job after a JobRun fails.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to runs of this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Timeout (integer) --
The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job.
NotificationProperty (dict) --
Specifies configuration properties of a job notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
NextToken (string) --
A continuation token, if not all job definitions have yet been returned.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Jobs': [
{
'Name': 'string',
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def get_mapping(Source=None, Sinks=None, Location=None):
"""
Creates mappings.
See also: AWS API Documentation
Exceptions
:example: response = client.get_mapping(
Source={
'DatabaseName': 'string',
'TableName': 'string'
},
Sinks=[
{
'DatabaseName': 'string',
'TableName': 'string'
},
],
Location={
'Jdbc': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'S3': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'DynamoDB': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
]
}
)
:type Source: dict
:param Source: [REQUIRED]\nSpecifies the source table.\n\nDatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\n\nTableName (string) -- [REQUIRED]The name of the table in question.\n\n\n
:type Sinks: list
:param Sinks: A list of target tables.\n\n(dict) --Specifies a table definition in the AWS Glue Data Catalog.\n\nDatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\n\nTableName (string) -- [REQUIRED]The name of the table in question.\n\n\n\n\n
:type Location: dict
:param Location: Parameters for the mapping.\n\nJdbc (list) --A JDBC location.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\nS3 (list) --An Amazon Simple Storage Service (Amazon S3) location.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\nDynamoDB (list) --An Amazon DynamoDB table location.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Mapping': [
{
'SourceTable': 'string',
'SourcePath': 'string',
'SourceType': 'string',
'TargetTable': 'string',
'TargetPath': 'string',
'TargetType': 'string'
},
]
}
Response Structure
(dict) --
Mapping (list) --
A list of mappings to the specified targets.
(dict) --
Defines a mapping.
SourceTable (string) --
The name of the source table.
SourcePath (string) --
The source path.
SourceType (string) --
The source type.
TargetTable (string) --
The target table.
TargetPath (string) --
The target path.
TargetType (string) --
The target type.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.EntityNotFoundException
:return: {
'Mapping': [
{
'SourceTable': 'string',
'SourcePath': 'string',
'SourceType': 'string',
'TargetTable': 'string',
'TargetPath': 'string',
'TargetType': 'string'
},
]
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.EntityNotFoundException
"""
pass
def get_ml_task_run(TransformId=None, TaskRunId=None):
"""
Gets details for a specific task run on a machine learning transform. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can check the stats of any task run by calling GetMLTaskRun with the TaskRunID and its parent transform\'s TransformID .
See also: AWS API Documentation
Exceptions
:example: response = client.get_ml_task_run(
TransformId='string',
TaskRunId='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:type TaskRunId: string
:param TaskRunId: [REQUIRED]\nThe unique identifier of the task run.\n
:rtype: dict
ReturnsResponse Syntax
{
'TransformId': 'string',
'TaskRunId': 'string',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'LogGroupName': 'string',
'Properties': {
'TaskType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',
'ImportLabelsTaskRunProperties': {
'InputS3Path': 'string',
'Replace': True|False
},
'ExportLabelsTaskRunProperties': {
'OutputS3Path': 'string'
},
'LabelingSetGenerationTaskRunProperties': {
'OutputS3Path': 'string'
},
'FindMatchesTaskRunProperties': {
'JobId': 'string',
'JobName': 'string',
'JobRunId': 'string'
}
},
'ErrorString': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ExecutionTime': 123
}
Response Structure
(dict) --
TransformId (string) --
The unique identifier of the task run.
TaskRunId (string) --
The unique run identifier associated with this run.
Status (string) --
The status for this task run.
LogGroupName (string) --
The names of the log groups that are associated with the task run.
Properties (dict) --
The list of properties that are associated with the task run.
TaskType (string) --
The type of task run.
ImportLabelsTaskRunProperties (dict) --
The configuration properties for an importing labels task run.
InputS3Path (string) --
The Amazon Simple Storage Service (Amazon S3) path from where you will import the labels.
Replace (boolean) --
Indicates whether to overwrite your existing labels.
ExportLabelsTaskRunProperties (dict) --
The configuration properties for an exporting labels task run.
OutputS3Path (string) --
The Amazon Simple Storage Service (Amazon S3) path where you will export the labels.
LabelingSetGenerationTaskRunProperties (dict) --
The configuration properties for a labeling set generation task run.
OutputS3Path (string) --
The Amazon Simple Storage Service (Amazon S3) path where you will generate the labeling set.
FindMatchesTaskRunProperties (dict) --
The configuration properties for a find matches task run.
JobId (string) --
The job ID for the Find Matches task run.
JobName (string) --
The name assigned to the job for the Find Matches task run.
JobRunId (string) --
The job run ID for the Find Matches task run.
ErrorString (string) --
The error strings that are associated with the task run.
StartedOn (datetime) --
The date and time when this task run started.
LastModifiedOn (datetime) --
The date and time when this task run was last modified.
CompletedOn (datetime) --
The date and time when this task run was completed.
ExecutionTime (integer) --
The amount of time (in seconds) that the task run consumed resources.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TransformId': 'string',
'TaskRunId': 'string',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'LogGroupName': 'string',
'Properties': {
'TaskType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',
'ImportLabelsTaskRunProperties': {
'InputS3Path': 'string',
'Replace': True|False
},
'ExportLabelsTaskRunProperties': {
'OutputS3Path': 'string'
},
'LabelingSetGenerationTaskRunProperties': {
'OutputS3Path': 'string'
},
'FindMatchesTaskRunProperties': {
'JobId': 'string',
'JobName': 'string',
'JobRunId': 'string'
}
},
'ErrorString': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ExecutionTime': 123
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
"""
pass
def get_ml_task_runs(TransformId=None, NextToken=None, MaxResults=None, Filter=None, Sort=None):
"""
Gets a list of runs for a machine learning transform. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can get a sortable, filterable list of machine learning task runs by calling GetMLTaskRuns with their parent transform\'s TransformID and other optional parameters as documented in this section.
This operation returns a list of historic runs and must be paginated.
See also: AWS API Documentation
Exceptions
:example: response = client.get_ml_task_runs(
TransformId='string',
NextToken='string',
MaxResults=123,
Filter={
'TaskRunType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'StartedBefore': datetime(2015, 1, 1),
'StartedAfter': datetime(2015, 1, 1)
},
Sort={
'Column': 'TASK_RUN_TYPE'|'STATUS'|'STARTED',
'SortDirection': 'DESCENDING'|'ASCENDING'
}
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:type NextToken: string
:param NextToken: A token for pagination of the results. The default is empty.
:type MaxResults: integer
:param MaxResults: The maximum number of results to return.
:type Filter: dict
:param Filter: The filter criteria, in the TaskRunFilterCriteria structure, for the task run.\n\nTaskRunType (string) --The type of task run.\n\nStatus (string) --The current status of the task run.\n\nStartedBefore (datetime) --Filter on task runs started before this date.\n\nStartedAfter (datetime) --Filter on task runs started after this date.\n\n\n
:type Sort: dict
:param Sort: The sorting criteria, in the TaskRunSortCriteria structure, for the task run.\n\nColumn (string) -- [REQUIRED]The column to be used to sort the list of task runs for the machine learning transform.\n\nSortDirection (string) -- [REQUIRED]The sort direction to be used to sort the list of task runs for the machine learning transform.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'TaskRuns': [
{
'TransformId': 'string',
'TaskRunId': 'string',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'LogGroupName': 'string',
'Properties': {
'TaskType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',
'ImportLabelsTaskRunProperties': {
'InputS3Path': 'string',
'Replace': True|False
},
'ExportLabelsTaskRunProperties': {
'OutputS3Path': 'string'
},
'LabelingSetGenerationTaskRunProperties': {
'OutputS3Path': 'string'
},
'FindMatchesTaskRunProperties': {
'JobId': 'string',
'JobName': 'string',
'JobRunId': 'string'
}
},
'ErrorString': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ExecutionTime': 123
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
TaskRuns (list) --
A list of task runs that are associated with the transform.
(dict) --
The sampling parameters that are associated with the machine learning transform.
TransformId (string) --
The unique identifier for the transform.
TaskRunId (string) --
The unique identifier for this task run.
Status (string) --
The current status of the requested task run.
LogGroupName (string) --
The names of the log group for secure logging, associated with this task run.
Properties (dict) --
Specifies configuration properties associated with this task run.
TaskType (string) --
The type of task run.
ImportLabelsTaskRunProperties (dict) --
The configuration properties for an importing labels task run.
InputS3Path (string) --
The Amazon Simple Storage Service (Amazon S3) path from where you will import the labels.
Replace (boolean) --
Indicates whether to overwrite your existing labels.
ExportLabelsTaskRunProperties (dict) --
The configuration properties for an exporting labels task run.
OutputS3Path (string) --
The Amazon Simple Storage Service (Amazon S3) path where you will export the labels.
LabelingSetGenerationTaskRunProperties (dict) --
The configuration properties for a labeling set generation task run.
OutputS3Path (string) --
The Amazon Simple Storage Service (Amazon S3) path where you will generate the labeling set.
FindMatchesTaskRunProperties (dict) --
The configuration properties for a find matches task run.
JobId (string) --
The job ID for the Find Matches task run.
JobName (string) --
The name assigned to the job for the Find Matches task run.
JobRunId (string) --
The job run ID for the Find Matches task run.
ErrorString (string) --
The list of error strings associated with this task run.
StartedOn (datetime) --
The date and time that this task run started.
LastModifiedOn (datetime) --
The last point in time that the requested task run was updated.
CompletedOn (datetime) --
The last point in time that the requested task run was completed.
ExecutionTime (integer) --
The amount of time (in seconds) that the task run consumed resources.
NextToken (string) --
A pagination token, if more results are available.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TaskRuns': [
{
'TransformId': 'string',
'TaskRunId': 'string',
'Status': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'LogGroupName': 'string',
'Properties': {
'TaskType': 'EVALUATION'|'LABELING_SET_GENERATION'|'IMPORT_LABELS'|'EXPORT_LABELS'|'FIND_MATCHES',
'ImportLabelsTaskRunProperties': {
'InputS3Path': 'string',
'Replace': True|False
},
'ExportLabelsTaskRunProperties': {
'OutputS3Path': 'string'
},
'LabelingSetGenerationTaskRunProperties': {
'OutputS3Path': 'string'
},
'FindMatchesTaskRunProperties': {
'JobId': 'string',
'JobName': 'string',
'JobRunId': 'string'
}
},
'ErrorString': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ExecutionTime': 123
},
],
'NextToken': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
"""
pass
def get_ml_transform(TransformId=None):
"""
Gets an AWS Glue machine learning transform artifact and all its corresponding metadata. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue. You can retrieve their metadata by calling GetMLTransform .
See also: AWS API Documentation
Exceptions
:example: response = client.get_ml_transform(
TransformId='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the transform, generated at the time that the transform was created.\n
:rtype: dict
ReturnsResponse Syntax{
'TransformId': 'string',
'Name': 'string',
'Description': 'string',
'Status': 'NOT_READY'|'READY'|'DELETING',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'InputRecordTables': [
{
'DatabaseName': 'string',
'TableName': 'string',
'CatalogId': 'string',
'ConnectionName': 'string'
},
],
'Parameters': {
'TransformType': 'FIND_MATCHES',
'FindMatchesParameters': {
'PrimaryKeyColumnName': 'string',
'PrecisionRecallTradeoff': 123.0,
'AccuracyCostTradeoff': 123.0,
'EnforceProvidedLabels': True|False
}
},
'EvaluationMetrics': {
'TransformType': 'FIND_MATCHES',
'FindMatchesMetrics': {
'AreaUnderPRCurve': 123.0,
'Precision': 123.0,
'Recall': 123.0,
'F1': 123.0,
'ConfusionMatrix': {
'NumTruePositives': 123,
'NumFalsePositives': 123,
'NumTrueNegatives': 123,
'NumFalseNegatives': 123
}
}
},
'LabelCount': 123,
'Schema': [
{
'Name': 'string',
'DataType': 'string'
},
],
'Role': 'string',
'GlueVersion': 'string',
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'Timeout': 123,
'MaxRetries': 123
}
Response Structure
(dict) --
TransformId (string) --The unique identifier of the transform, generated at the time that the transform was created.
Name (string) --The unique name given to the transform when it was created.
Description (string) --A description of the transform.
Status (string) --The last known status of the transform (to indicate whether it can be used or not). One of "NOT_READY", "READY", or "DELETING".
CreatedOn (datetime) --The date and time when the transform was created.
LastModifiedOn (datetime) --The date and time when the transform was last modified.
InputRecordTables (list) --A list of AWS Glue table definitions used by the transform.
(dict) --The database and table in the AWS Glue Data Catalog that is used for input or output data.
DatabaseName (string) --A database name in the AWS Glue Data Catalog.
TableName (string) --A table name in the AWS Glue Data Catalog.
CatalogId (string) --A unique identifier for the AWS Glue Data Catalog.
ConnectionName (string) --The name of the connection to the AWS Glue Data Catalog.
Parameters (dict) --The configuration parameters that are specific to the algorithm used.
TransformType (string) --The type of machine learning transform.
For information about the types of machine learning transforms, see Creating Machine Learning Transforms .
FindMatchesParameters (dict) --The parameters for the find matches algorithm.
PrimaryKeyColumnName (string) --The name of a column that uniquely identifies rows in the source table. Used to help identify matching records.
PrecisionRecallTradeoff (float) --The value selected when tuning your transform for a balance between precision and recall. A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0 means very low recall, and choosing values close to 0.0 results in very low precision.
The precision metric indicates how often your model is correct when it predicts a match.
The recall metric indicates that for an actual match, how often your model predicts the match.
AccuracyCostTradeoff (float) --The value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.
Accuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.
Cost measures how many compute resources, and thus money, are consumed to run the transform.
EnforceProvidedLabels (boolean) --The value to switch on or off to force the output to match the provided labels from users. If the value is True , the find matches transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False , the find matches transform does not ensure all the labels provided are respected, and the results rely on the trained model.
Note that setting this value to true may increase the conflation execution time.
EvaluationMetrics (dict) --The latest evaluation metrics.
TransformType (string) --The type of machine learning transform.
FindMatchesMetrics (dict) --The evaluation metrics for the find matches algorithm.
AreaUnderPRCurve (float) --The area under the precision/recall curve (AUPRC) is a single number measuring the overall quality of the transform, that is independent of the choice made for precision vs. recall. Higher values indicate that you have a more attractive precision vs. recall tradeoff.
For more information, see Precision and recall in Wikipedia.
Precision (float) --The precision metric indicates when often your transform is correct when it predicts a match. Specifically, it measures how well the transform finds true positives from the total true positives possible.
For more information, see Precision and recall in Wikipedia.
Recall (float) --The recall metric indicates that for an actual match, how often your transform predicts the match. Specifically, it measures how well the transform finds true positives from the total records in the source data.
For more information, see Precision and recall in Wikipedia.
F1 (float) --The maximum F1 metric indicates the transform\'s accuracy between 0 and 1, where 1 is the best accuracy.
For more information, see F1 score in Wikipedia.
ConfusionMatrix (dict) --The confusion matrix shows you what your transform is predicting accurately and what types of errors it is making.
For more information, see Confusion matrix in Wikipedia.
NumTruePositives (integer) --The number of matches in the data that the transform correctly found, in the confusion matrix for your transform.
NumFalsePositives (integer) --The number of nonmatches in the data that the transform incorrectly classified as a match, in the confusion matrix for your transform.
NumTrueNegatives (integer) --The number of nonmatches in the data that the transform correctly rejected, in the confusion matrix for your transform.
NumFalseNegatives (integer) --The number of matches in the data that the transform didn\'t find, in the confusion matrix for your transform.
LabelCount (integer) --The number of labels available for this transform.
Schema (list) --The Map<Column, Type> object that represents the schema that this transform accepts. Has an upper bound of 100 columns.
(dict) --A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.
Name (string) --The name of the column.
DataType (string) --The type of data in the column.
Role (string) --The name or Amazon Resource Name (ARN) of the IAM role with the required permissions.
GlueVersion (string) --This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.
MaxCapacity (float) --The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
When the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.
WorkerType (string) --The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated when this task runs.
Timeout (integer) --The timeout for a task run for this transform in minutes. This is the maximum time that a task run for this transform can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
MaxRetries (integer) --The maximum number of times to retry a task for this transform after a task run fails.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TransformId': 'string',
'Name': 'string',
'Description': 'string',
'Status': 'NOT_READY'|'READY'|'DELETING',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'InputRecordTables': [
{
'DatabaseName': 'string',
'TableName': 'string',
'CatalogId': 'string',
'ConnectionName': 'string'
},
],
'Parameters': {
'TransformType': 'FIND_MATCHES',
'FindMatchesParameters': {
'PrimaryKeyColumnName': 'string',
'PrecisionRecallTradeoff': 123.0,
'AccuracyCostTradeoff': 123.0,
'EnforceProvidedLabels': True|False
}
},
'EvaluationMetrics': {
'TransformType': 'FIND_MATCHES',
'FindMatchesMetrics': {
'AreaUnderPRCurve': 123.0,
'Precision': 123.0,
'Recall': 123.0,
'F1': 123.0,
'ConfusionMatrix': {
'NumTruePositives': 123,
'NumFalsePositives': 123,
'NumTrueNegatives': 123,
'NumFalseNegatives': 123
}
}
},
'LabelCount': 123,
'Schema': [
{
'Name': 'string',
'DataType': 'string'
},
],
'Role': 'string',
'GlueVersion': 'string',
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'Timeout': 123,
'MaxRetries': 123
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
"""
pass
def get_ml_transforms(NextToken=None, MaxResults=None, Filter=None, Sort=None):
"""
Gets a sortable, filterable list of existing AWS Glue machine learning transforms. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue, and you can retrieve their metadata by calling GetMLTransforms .
See also: AWS API Documentation
Exceptions
:example: response = client.get_ml_transforms(
NextToken='string',
MaxResults=123,
Filter={
'Name': 'string',
'TransformType': 'FIND_MATCHES',
'Status': 'NOT_READY'|'READY'|'DELETING',
'GlueVersion': 'string',
'CreatedBefore': datetime(2015, 1, 1),
'CreatedAfter': datetime(2015, 1, 1),
'LastModifiedBefore': datetime(2015, 1, 1),
'LastModifiedAfter': datetime(2015, 1, 1),
'Schema': [
{
'Name': 'string',
'DataType': 'string'
},
]
},
Sort={
'Column': 'NAME'|'TRANSFORM_TYPE'|'STATUS'|'CREATED'|'LAST_MODIFIED',
'SortDirection': 'DESCENDING'|'ASCENDING'
}
)
:type NextToken: string
:param NextToken: A paginated token to offset the results.
:type MaxResults: integer
:param MaxResults: The maximum number of results to return.
:type Filter: dict
:param Filter: The filter transformation criteria.\n\nName (string) --A unique transform name that is used to filter the machine learning transforms.\n\nTransformType (string) --The type of machine learning transform that is used to filter the machine learning transforms.\n\nStatus (string) --Filters the list of machine learning transforms by the last known status of the transforms (to indicate whether a transform can be used or not). One of 'NOT_READY', 'READY', or 'DELETING'.\n\nGlueVersion (string) --This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.\n\nCreatedBefore (datetime) --The time and date before which the transforms were created.\n\nCreatedAfter (datetime) --The time and date after which the transforms were created.\n\nLastModifiedBefore (datetime) --Filter on transforms last modified before this date.\n\nLastModifiedAfter (datetime) --Filter on transforms last modified after this date.\n\nSchema (list) --Filters on datasets with a specific schema. The Map<Column, Type> object is an array of key-value pairs representing the schema this transform accepts, where Column is the name of a column, and Type is the type of the data such as an integer or string. Has an upper bound of 100 columns.\n\n(dict) --A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.\n\nName (string) --The name of the column.\n\nDataType (string) --The type of data in the column.\n\n\n\n\n\n\n
:type Sort: dict
:param Sort: The sorting criteria.\n\nColumn (string) -- [REQUIRED]The column to be used in the sorting criteria that are associated with the machine learning transform.\n\nSortDirection (string) -- [REQUIRED]The sort direction to be used in the sorting criteria that are associated with the machine learning transform.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Transforms': [
{
'TransformId': 'string',
'Name': 'string',
'Description': 'string',
'Status': 'NOT_READY'|'READY'|'DELETING',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'InputRecordTables': [
{
'DatabaseName': 'string',
'TableName': 'string',
'CatalogId': 'string',
'ConnectionName': 'string'
},
],
'Parameters': {
'TransformType': 'FIND_MATCHES',
'FindMatchesParameters': {
'PrimaryKeyColumnName': 'string',
'PrecisionRecallTradeoff': 123.0,
'AccuracyCostTradeoff': 123.0,
'EnforceProvidedLabels': True|False
}
},
'EvaluationMetrics': {
'TransformType': 'FIND_MATCHES',
'FindMatchesMetrics': {
'AreaUnderPRCurve': 123.0,
'Precision': 123.0,
'Recall': 123.0,
'F1': 123.0,
'ConfusionMatrix': {
'NumTruePositives': 123,
'NumFalsePositives': 123,
'NumTrueNegatives': 123,
'NumFalseNegatives': 123
}
}
},
'LabelCount': 123,
'Schema': [
{
'Name': 'string',
'DataType': 'string'
},
],
'Role': 'string',
'GlueVersion': 'string',
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'Timeout': 123,
'MaxRetries': 123
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Transforms (list) --
A list of machine learning transforms.
(dict) --
A structure for a machine learning transform.
TransformId (string) --
The unique transform ID that is generated for the machine learning transform. The ID is guaranteed to be unique and does not change.
Name (string) --
A user-defined name for the machine learning transform. Names are not guaranteed unique and can be changed at any time.
Description (string) --
A user-defined, long-form description text for the machine learning transform. Descriptions are not guaranteed to be unique and can be changed at any time.
Status (string) --
The current status of the machine learning transform.
CreatedOn (datetime) --
A timestamp. The time and date that this machine learning transform was created.
LastModifiedOn (datetime) --
A timestamp. The last point in time when this machine learning transform was modified.
InputRecordTables (list) --
A list of AWS Glue table definitions used by the transform.
(dict) --
The database and table in the AWS Glue Data Catalog that is used for input or output data.
DatabaseName (string) --
A database name in the AWS Glue Data Catalog.
TableName (string) --
A table name in the AWS Glue Data Catalog.
CatalogId (string) --
A unique identifier for the AWS Glue Data Catalog.
ConnectionName (string) --
The name of the connection to the AWS Glue Data Catalog.
Parameters (dict) --
A TransformParameters object. You can use parameters to tune (customize) the behavior of the machine learning transform by specifying what data it learns from and your preference on various tradeoffs (such as precious vs. recall, or accuracy vs. cost).
TransformType (string) --
The type of machine learning transform.
For information about the types of machine learning transforms, see Creating Machine Learning Transforms .
FindMatchesParameters (dict) --
The parameters for the find matches algorithm.
PrimaryKeyColumnName (string) --
The name of a column that uniquely identifies rows in the source table. Used to help identify matching records.
PrecisionRecallTradeoff (float) --
The value selected when tuning your transform for a balance between precision and recall. A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0 means very low recall, and choosing values close to 0.0 results in very low precision.
The precision metric indicates how often your model is correct when it predicts a match.
The recall metric indicates that for an actual match, how often your model predicts the match.
AccuracyCostTradeoff (float) --
The value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.
Accuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.
Cost measures how many compute resources, and thus money, are consumed to run the transform.
EnforceProvidedLabels (boolean) --
The value to switch on or off to force the output to match the provided labels from users. If the value is True , the find matches transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False , the find matches transform does not ensure all the labels provided are respected, and the results rely on the trained model.
Note that setting this value to true may increase the conflation execution time.
EvaluationMetrics (dict) --
An EvaluationMetrics object. Evaluation metrics provide an estimate of the quality of your machine learning transform.
TransformType (string) --
The type of machine learning transform.
FindMatchesMetrics (dict) --
The evaluation metrics for the find matches algorithm.
AreaUnderPRCurve (float) --
The area under the precision/recall curve (AUPRC) is a single number measuring the overall quality of the transform, that is independent of the choice made for precision vs. recall. Higher values indicate that you have a more attractive precision vs. recall tradeoff.
For more information, see Precision and recall in Wikipedia.
Precision (float) --
The precision metric indicates when often your transform is correct when it predicts a match. Specifically, it measures how well the transform finds true positives from the total true positives possible.
For more information, see Precision and recall in Wikipedia.
Recall (float) --
The recall metric indicates that for an actual match, how often your transform predicts the match. Specifically, it measures how well the transform finds true positives from the total records in the source data.
For more information, see Precision and recall in Wikipedia.
F1 (float) --
The maximum F1 metric indicates the transform\'s accuracy between 0 and 1, where 1 is the best accuracy.
For more information, see F1 score in Wikipedia.
ConfusionMatrix (dict) --
The confusion matrix shows you what your transform is predicting accurately and what types of errors it is making.
For more information, see Confusion matrix in Wikipedia.
NumTruePositives (integer) --
The number of matches in the data that the transform correctly found, in the confusion matrix for your transform.
NumFalsePositives (integer) --
The number of nonmatches in the data that the transform incorrectly classified as a match, in the confusion matrix for your transform.
NumTrueNegatives (integer) --
The number of nonmatches in the data that the transform correctly rejected, in the confusion matrix for your transform.
NumFalseNegatives (integer) --
The number of matches in the data that the transform didn\'t find, in the confusion matrix for your transform.
LabelCount (integer) --
A count identifier for the labeling files generated by AWS Glue for this transform. As you create a better transform, you can iteratively download, label, and upload the labeling file.
Schema (list) --
A map of key-value pairs representing the columns and data types that this transform can run against. Has an upper bound of 100 columns.
(dict) --
A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.
Name (string) --
The name of the column.
DataType (string) --
The type of data in the column.
Role (string) --
The name or Amazon Resource Name (ARN) of the IAM role with the required permissions. The required permissions include both AWS Glue service role permissions to AWS Glue resources, and Amazon S3 permissions required by the transform.
This role needs AWS Glue service role permissions to allow access to resources in AWS Glue. See Attach a Policy to IAM Users That Access AWS Glue .
This role needs permission to your Amazon Simple Storage Service (Amazon S3) sources, targets, temporary directory, scripts, and any libraries used by the task run for this transform.
GlueVersion (string) --
This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
MaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType .
If either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be set.
If MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set.
If WorkerType is set, then NumberOfWorkers is required (and vice versa).
MaxCapacity and NumberOfWorkers must both be at least 1.
When the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.
WorkerType (string) --
The type of predefined worker that is allocated when a task of this transform runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
MaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType .
If either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be set.
If MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set.
If WorkerType is set, then NumberOfWorkers is required (and vice versa).
MaxCapacity and NumberOfWorkers must both be at least 1.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a task of the transform runs.
If WorkerType is set, then NumberOfWorkers is required (and vice versa).
Timeout (integer) --
The timeout in minutes of the machine learning transform.
MaxRetries (integer) --
The maximum number of times to retry after an MLTaskRun of the machine learning transform fails.
NextToken (string) --
A pagination token, if more results are available.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'Transforms': [
{
'TransformId': 'string',
'Name': 'string',
'Description': 'string',
'Status': 'NOT_READY'|'READY'|'DELETING',
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'InputRecordTables': [
{
'DatabaseName': 'string',
'TableName': 'string',
'CatalogId': 'string',
'ConnectionName': 'string'
},
],
'Parameters': {
'TransformType': 'FIND_MATCHES',
'FindMatchesParameters': {
'PrimaryKeyColumnName': 'string',
'PrecisionRecallTradeoff': 123.0,
'AccuracyCostTradeoff': 123.0,
'EnforceProvidedLabels': True|False
}
},
'EvaluationMetrics': {
'TransformType': 'FIND_MATCHES',
'FindMatchesMetrics': {
'AreaUnderPRCurve': 123.0,
'Precision': 123.0,
'Recall': 123.0,
'F1': 123.0,
'ConfusionMatrix': {
'NumTruePositives': 123,
'NumFalsePositives': 123,
'NumTrueNegatives': 123,
'NumFalseNegatives': 123
}
}
},
'LabelCount': 123,
'Schema': [
{
'Name': 'string',
'DataType': 'string'
},
],
'Role': 'string',
'GlueVersion': 'string',
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'Timeout': 123,
'MaxRetries': 123
},
],
'NextToken': 'string'
}
:returns:
This role needs AWS Glue service role permissions to allow access to resources in AWS Glue. See Attach a Policy to IAM Users That Access AWS Glue .
This role needs permission to your Amazon Simple Storage Service (Amazon S3) sources, targets, temporary directory, scripts, and any libraries used by the task run for this transform.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionValues=None):
"""
Retrieves information about a specified partition.
See also: AWS API Documentation
Exceptions
:example: response = client.get_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionValues=[
'string',
]
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the partition in question resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database where the partition resides.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the partition\'s table.\n
:type PartitionValues: list
:param PartitionValues: [REQUIRED]\nThe values that define the partition.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Partition': {
'Values': [
'string',
],
'DatabaseName': 'string',
'TableName': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
}
}
Response Structure
(dict) --
Partition (dict) --
The requested information, in the form of a Partition object.
Values (list) --
The values of the partition.
(string) --
DatabaseName (string) --
The name of the catalog database in which to create the partition.
TableName (string) --
The name of the database table in which to create the partition.
CreationTime (datetime) --
The time at which the partition was created.
LastAccessTime (datetime) --
The last time at which the partition was accessed.
StorageDescriptor (dict) --
Provides information about the physical location where the partition is stored.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
Parameters (dict) --
These key-value pairs define partition parameters.
(string) --
(string) --
LastAnalyzedTime (datetime) --
The last time at which column statistics were computed for this partition.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Partition': {
'Values': [
'string',
],
'DatabaseName': 'string',
'TableName': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
}
}
:returns:
(string) --
"""
pass
def get_partitions(CatalogId=None, DatabaseName=None, TableName=None, Expression=None, NextToken=None, Segment=None, MaxResults=None):
"""
Retrieves information about the partitions in a table.
See also: AWS API Documentation
Exceptions
:example: response = client.get_partitions(
CatalogId='string',
DatabaseName='string',
TableName='string',
Expression='string',
NextToken='string',
Segment={
'SegmentNumber': 123,
'TotalSegments': 123
},
MaxResults=123
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the partitions in question reside. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database where the partitions reside.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the partitions\' table.\n
:type Expression: string
:param Expression: An expression that filters the partitions to be returned.\nThe expression uses SQL syntax similar to the SQL WHERE filter clause. The SQL statement parser JSQLParser parses the expression.\n\nOperators : The following are the operators that you can use in the Expression API call:\n=\n\nChecks whether the values of the two operands are equal; if yes, then the condition becomes true.\nExample: Assume \'variable a\' holds 10 and \'variable b\' holds 20.\n(a = b) is not true.\n\n< >\nChecks whether the values of two operands are equal; if the values are not equal, then the condition becomes true.\nExample: (a < > b) is true.\n\n>\nChecks whether the value of the left operand is greater than the value of the right operand; if yes, then the condition becomes true.\nExample: (a > b) is not true.\n\n<\nChecks whether the value of the left operand is less than the value of the right operand; if yes, then the condition becomes true.\nExample: (a < b) is true.\n\n>=\nChecks whether the value of the left operand is greater than or equal to the value of the right operand; if yes, then the condition becomes true.\nExample: (a >= b) is not true.\n\n<=\nChecks whether the value of the left operand is less than or equal to the value of the right operand; if yes, then the condition becomes true.\nExample: (a <= b) is true.\n\nAND, OR, IN, BETWEEN, LIKE, NOT, IS NULL\nLogical operators.\n\nSupported Partition Key Types : The following are the supported partition keys.\n\nstring\ndate\ntimestamp\nint\nbigint\nlong\ntinyint\nsmallint\ndecimal\n\nIf an invalid type is encountered, an exception is thrown.\nThe following list shows the valid operators on each type. When you define a crawler, the partitionKey type is created as a STRING , to be compatible with the catalog partitions.\n\nSample API Call :\n
:type NextToken: string
:param NextToken: A continuation token, if this is not the first call to retrieve these partitions.
:type Segment: dict
:param Segment: The segment of the table\'s partitions to scan in this request.\n\nSegmentNumber (integer) -- [REQUIRED]The zero-based index number of the segment. For example, if the total number of segments is 4, SegmentNumber values range from 0 through 3.\n\nTotalSegments (integer) -- [REQUIRED]The total number of segments.\n\n\n
:type MaxResults: integer
:param MaxResults: The maximum number of partitions to return in a single response.
:rtype: dict
ReturnsResponse Syntax
{
'Partitions': [
{
'Values': [
'string',
],
'DatabaseName': 'string',
'TableName': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Partitions (list) --
A list of requested partitions.
(dict) --
Represents a slice of table data.
Values (list) --
The values of the partition.
(string) --
DatabaseName (string) --
The name of the catalog database in which to create the partition.
TableName (string) --
The name of the database table in which to create the partition.
CreationTime (datetime) --
The time at which the partition was created.
LastAccessTime (datetime) --
The last time at which the partition was accessed.
StorageDescriptor (dict) --
Provides information about the physical location where the partition is stored.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
Parameters (dict) --
These key-value pairs define partition parameters.
(string) --
(string) --
LastAnalyzedTime (datetime) --
The last time at which column statistics were computed for this partition.
NextToken (string) --
A continuation token, if the returned list of partitions does not include the last one.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Partitions': [
{
'Values': [
'string',
],
'DatabaseName': 'string',
'TableName': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def get_plan(Mapping=None, Source=None, Sinks=None, Location=None, Language=None):
"""
Gets code to perform a specified mapping.
See also: AWS API Documentation
Exceptions
:example: response = client.get_plan(
Mapping=[
{
'SourceTable': 'string',
'SourcePath': 'string',
'SourceType': 'string',
'TargetTable': 'string',
'TargetPath': 'string',
'TargetType': 'string'
},
],
Source={
'DatabaseName': 'string',
'TableName': 'string'
},
Sinks=[
{
'DatabaseName': 'string',
'TableName': 'string'
},
],
Location={
'Jdbc': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'S3': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
],
'DynamoDB': [
{
'Name': 'string',
'Value': 'string',
'Param': True|False
},
]
},
Language='PYTHON'|'SCALA'
)
:type Mapping: list
:param Mapping: [REQUIRED]\nThe list of mappings from a source table to target tables.\n\n(dict) --Defines a mapping.\n\nSourceTable (string) --The name of the source table.\n\nSourcePath (string) --The source path.\n\nSourceType (string) --The source type.\n\nTargetTable (string) --The target table.\n\nTargetPath (string) --The target path.\n\nTargetType (string) --The target type.\n\n\n\n\n
:type Source: dict
:param Source: [REQUIRED]\nThe source table.\n\nDatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\n\nTableName (string) -- [REQUIRED]The name of the table in question.\n\n\n
:type Sinks: list
:param Sinks: The target tables.\n\n(dict) --Specifies a table definition in the AWS Glue Data Catalog.\n\nDatabaseName (string) -- [REQUIRED]The database in which the table metadata resides.\n\nTableName (string) -- [REQUIRED]The name of the table in question.\n\n\n\n\n
:type Location: dict
:param Location: The parameters for the mapping.\n\nJdbc (list) --A JDBC location.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\nS3 (list) --An Amazon Simple Storage Service (Amazon S3) location.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\nDynamoDB (list) --An Amazon DynamoDB table location.\n\n(dict) --An argument or property of a node.\n\nName (string) -- [REQUIRED]The name of the argument or property.\n\nValue (string) -- [REQUIRED]The value of the argument or property.\n\nParam (boolean) --True if the value is used as a parameter.\n\n\n\n\n\n\n
:type Language: string
:param Language: The programming language of the code to perform the mapping.
:rtype: dict
ReturnsResponse Syntax
{
'PythonScript': 'string',
'ScalaCode': 'string'
}
Response Structure
(dict) --
PythonScript (string) --
A Python script to perform the mapping.
ScalaCode (string) --
The Scala code to perform the mapping.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'PythonScript': 'string',
'ScalaCode': 'string'
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def get_resource_policy():
"""
Retrieves a specified resource policy.
See also: AWS API Documentation
Exceptions
:example: response = client.get_resource_policy()
:rtype: dict
ReturnsResponse Syntax{
'PolicyInJson': 'string',
'PolicyHash': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1)
}
Response Structure
(dict) --
PolicyInJson (string) --Contains the requested policy document, in JSON format.
PolicyHash (string) --Contains the hash value associated with this policy.
CreateTime (datetime) --The date and time at which the policy was created.
UpdateTime (datetime) --The date and time at which the policy was last updated.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
:return: {
'PolicyInJson': 'string',
'PolicyHash': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1)
}
"""
pass
def get_security_configuration(Name=None):
"""
Retrieves a specified security configuration.
See also: AWS API Documentation
Exceptions
:example: response = client.get_security_configuration(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the security configuration to retrieve.\n
:rtype: dict
ReturnsResponse Syntax{
'SecurityConfiguration': {
'Name': 'string',
'CreatedTimeStamp': datetime(2015, 1, 1),
'EncryptionConfiguration': {
'S3Encryption': [
{
'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',
'KmsKeyArn': 'string'
},
],
'CloudWatchEncryption': {
'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',
'KmsKeyArn': 'string'
},
'JobBookmarksEncryption': {
'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',
'KmsKeyArn': 'string'
}
}
}
}
Response Structure
(dict) --
SecurityConfiguration (dict) --The requested security configuration.
Name (string) --The name of the security configuration.
CreatedTimeStamp (datetime) --The time at which this security configuration was created.
EncryptionConfiguration (dict) --The encryption configuration associated with this security configuration.
S3Encryption (list) --The encryption configuration for Amazon Simple Storage Service (Amazon S3) data.
(dict) --Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted.
S3EncryptionMode (string) --The encryption mode to use for Amazon S3 data.
KmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
CloudWatchEncryption (dict) --The encryption configuration for Amazon CloudWatch.
CloudWatchEncryptionMode (string) --The encryption mode to use for CloudWatch data.
KmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
JobBookmarksEncryption (dict) --The encryption configuration for job bookmarks.
JobBookmarksEncryptionMode (string) --The encryption mode to use for job bookmarks data.
KmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'SecurityConfiguration': {
'Name': 'string',
'CreatedTimeStamp': datetime(2015, 1, 1),
'EncryptionConfiguration': {
'S3Encryption': [
{
'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',
'KmsKeyArn': 'string'
},
],
'CloudWatchEncryption': {
'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',
'KmsKeyArn': 'string'
},
'JobBookmarksEncryption': {
'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',
'KmsKeyArn': 'string'
}
}
}
}
"""
pass
def get_security_configurations(MaxResults=None, NextToken=None):
"""
Retrieves a list of all security configurations.
See also: AWS API Documentation
Exceptions
:example: response = client.get_security_configurations(
MaxResults=123,
NextToken='string'
)
:type MaxResults: integer
:param MaxResults: The maximum number of results to return.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:rtype: dict
ReturnsResponse Syntax
{
'SecurityConfigurations': [
{
'Name': 'string',
'CreatedTimeStamp': datetime(2015, 1, 1),
'EncryptionConfiguration': {
'S3Encryption': [
{
'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',
'KmsKeyArn': 'string'
},
],
'CloudWatchEncryption': {
'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',
'KmsKeyArn': 'string'
},
'JobBookmarksEncryption': {
'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',
'KmsKeyArn': 'string'
}
}
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
SecurityConfigurations (list) --
A list of security configurations.
(dict) --
Specifies a security configuration.
Name (string) --
The name of the security configuration.
CreatedTimeStamp (datetime) --
The time at which this security configuration was created.
EncryptionConfiguration (dict) --
The encryption configuration associated with this security configuration.
S3Encryption (list) --
The encryption configuration for Amazon Simple Storage Service (Amazon S3) data.
(dict) --
Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted.
S3EncryptionMode (string) --
The encryption mode to use for Amazon S3 data.
KmsKeyArn (string) --
The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
CloudWatchEncryption (dict) --
The encryption configuration for Amazon CloudWatch.
CloudWatchEncryptionMode (string) --
The encryption mode to use for CloudWatch data.
KmsKeyArn (string) --
The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
JobBookmarksEncryption (dict) --
The encryption configuration for job bookmarks.
JobBookmarksEncryptionMode (string) --
The encryption mode to use for job bookmarks data.
KmsKeyArn (string) --
The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
NextToken (string) --
A continuation token, if there are more security configurations to return.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'SecurityConfigurations': [
{
'Name': 'string',
'CreatedTimeStamp': datetime(2015, 1, 1),
'EncryptionConfiguration': {
'S3Encryption': [
{
'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',
'KmsKeyArn': 'string'
},
],
'CloudWatchEncryption': {
'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',
'KmsKeyArn': 'string'
},
'JobBookmarksEncryption': {
'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',
'KmsKeyArn': 'string'
}
}
},
],
'NextToken': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def get_table(CatalogId=None, DatabaseName=None, Name=None):
"""
Retrieves the Table definition in a Data Catalog for a specified table.
See also: AWS API Documentation
Exceptions
:example: response = client.get_table(
CatalogId='string',
DatabaseName='string',
Name='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type Name: string
:param Name: [REQUIRED]\nThe name of the table for which to retrieve the definition. For Hive compatibility, this name is entirely lowercase.\n
:rtype: dict
ReturnsResponse Syntax
{
'Table': {
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
}
}
Response Structure
(dict) --
Table (dict) --
The Table object that defines the specified table.
Name (string) --
The table name. For Hive compatibility, this must be entirely lowercase.
DatabaseName (string) --
The name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.
Description (string) --
A description of the table.
Owner (string) --
The owner of the table.
CreateTime (datetime) --
The time when the table definition was created in the Data Catalog.
UpdateTime (datetime) --
The last time that the table was updated.
LastAccessTime (datetime) --
The last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.
LastAnalyzedTime (datetime) --
The last time that column statistics were computed for this table.
Retention (integer) --
The retention time for this table.
StorageDescriptor (dict) --
A storage descriptor containing information about the physical storage of this table.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
PartitionKeys (list) --
A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
When you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:
"PartitionKeys": []
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
ViewOriginalText (string) --
If the table is a view, the original text of the view; otherwise null .
ViewExpandedText (string) --
If the table is a view, the expanded text of the view; otherwise null .
TableType (string) --
The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).
Parameters (dict) --
These key-value pairs define properties associated with the table.
(string) --
(string) --
CreatedBy (string) --
The person or entity who created the table.
IsRegisteredWithLakeFormation (boolean) --
Indicates whether the table has been registered with AWS Lake Formation.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'Table': {
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
}
}
:returns:
(string) --
(string) --
"""
pass
def get_table_version(CatalogId=None, DatabaseName=None, TableName=None, VersionId=None):
"""
Retrieves a specified version of a table.
See also: AWS API Documentation
Exceptions
:example: response = client.get_table_version(
CatalogId='string',
DatabaseName='string',
TableName='string',
VersionId='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table. For Hive compatibility, this name is entirely lowercase.\n
:type VersionId: string
:param VersionId: The ID value of the table version to be retrieved. A VersionID is a string representation of an integer. Each version is incremented by 1.
:rtype: dict
ReturnsResponse Syntax
{
'TableVersion': {
'Table': {
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
'VersionId': 'string'
}
}
Response Structure
(dict) --
TableVersion (dict) --
The requested table version.
Table (dict) --
The table in question.
Name (string) --
The table name. For Hive compatibility, this must be entirely lowercase.
DatabaseName (string) --
The name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.
Description (string) --
A description of the table.
Owner (string) --
The owner of the table.
CreateTime (datetime) --
The time when the table definition was created in the Data Catalog.
UpdateTime (datetime) --
The last time that the table was updated.
LastAccessTime (datetime) --
The last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.
LastAnalyzedTime (datetime) --
The last time that column statistics were computed for this table.
Retention (integer) --
The retention time for this table.
StorageDescriptor (dict) --
A storage descriptor containing information about the physical storage of this table.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
PartitionKeys (list) --
A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
When you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:
"PartitionKeys": []
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
ViewOriginalText (string) --
If the table is a view, the original text of the view; otherwise null .
ViewExpandedText (string) --
If the table is a view, the expanded text of the view; otherwise null .
TableType (string) --
The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).
Parameters (dict) --
These key-value pairs define properties associated with the table.
(string) --
(string) --
CreatedBy (string) --
The person or entity who created the table.
IsRegisteredWithLakeFormation (boolean) --
Indicates whether the table has been registered with AWS Lake Formation.
VersionId (string) --
The ID value that identifies this table version. A VersionId is a string representation of an integer. Each version is incremented by 1.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'TableVersion': {
'Table': {
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
'VersionId': 'string'
}
}
:returns:
(string) --
(string) --
"""
pass
def get_table_versions(CatalogId=None, DatabaseName=None, TableName=None, NextToken=None, MaxResults=None):
"""
Retrieves a list of strings that identify available versions of a specified table.
See also: AWS API Documentation
Exceptions
:example: response = client.get_table_versions(
CatalogId='string',
DatabaseName='string',
TableName='string',
NextToken='string',
MaxResults=123
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table. For Hive compatibility, this name is entirely lowercase.\n
:type NextToken: string
:param NextToken: A continuation token, if this is not the first call.
:type MaxResults: integer
:param MaxResults: The maximum number of table versions to return in one response.
:rtype: dict
ReturnsResponse Syntax
{
'TableVersions': [
{
'Table': {
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
'VersionId': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
TableVersions (list) --
A list of strings identifying available versions of the specified table.
(dict) --
Specifies a version of a table.
Table (dict) --
The table in question.
Name (string) --
The table name. For Hive compatibility, this must be entirely lowercase.
DatabaseName (string) --
The name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.
Description (string) --
A description of the table.
Owner (string) --
The owner of the table.
CreateTime (datetime) --
The time when the table definition was created in the Data Catalog.
UpdateTime (datetime) --
The last time that the table was updated.
LastAccessTime (datetime) --
The last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.
LastAnalyzedTime (datetime) --
The last time that column statistics were computed for this table.
Retention (integer) --
The retention time for this table.
StorageDescriptor (dict) --
A storage descriptor containing information about the physical storage of this table.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
PartitionKeys (list) --
A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
When you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:
"PartitionKeys": []
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
ViewOriginalText (string) --
If the table is a view, the original text of the view; otherwise null .
ViewExpandedText (string) --
If the table is a view, the expanded text of the view; otherwise null .
TableType (string) --
The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).
Parameters (dict) --
These key-value pairs define properties associated with the table.
(string) --
(string) --
CreatedBy (string) --
The person or entity who created the table.
IsRegisteredWithLakeFormation (boolean) --
Indicates whether the table has been registered with AWS Lake Formation.
VersionId (string) --
The ID value that identifies this table version. A VersionId is a string representation of an integer. Each version is incremented by 1.
NextToken (string) --
A continuation token, if the list of available versions does not include the last one.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'TableVersions': [
{
'Table': {
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
'VersionId': 'string'
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def get_tables(CatalogId=None, DatabaseName=None, Expression=None, NextToken=None, MaxResults=None):
"""
Retrieves the definitions of some or all of the tables in a given Database .
See also: AWS API Documentation
Exceptions
:example: response = client.get_tables(
CatalogId='string',
DatabaseName='string',
Expression='string',
NextToken='string',
MaxResults=123
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the tables reside. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe database in the catalog whose tables to list. For Hive compatibility, this name is entirely lowercase.\n
:type Expression: string
:param Expression: A regular expression pattern. If present, only those tables whose names match the pattern are returned.
:type NextToken: string
:param NextToken: A continuation token, included if this is a continuation call.
:type MaxResults: integer
:param MaxResults: The maximum number of tables to return in a single response.
:rtype: dict
ReturnsResponse Syntax
{
'TableList': [
{
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
TableList (list) --
A list of the requested Table objects.
(dict) --
Represents a collection of related data organized in columns and rows.
Name (string) --
The table name. For Hive compatibility, this must be entirely lowercase.
DatabaseName (string) --
The name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.
Description (string) --
A description of the table.
Owner (string) --
The owner of the table.
CreateTime (datetime) --
The time when the table definition was created in the Data Catalog.
UpdateTime (datetime) --
The last time that the table was updated.
LastAccessTime (datetime) --
The last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.
LastAnalyzedTime (datetime) --
The last time that column statistics were computed for this table.
Retention (integer) --
The retention time for this table.
StorageDescriptor (dict) --
A storage descriptor containing information about the physical storage of this table.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
PartitionKeys (list) --
A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
When you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:
"PartitionKeys": []
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
ViewOriginalText (string) --
If the table is a view, the original text of the view; otherwise null .
ViewExpandedText (string) --
If the table is a view, the expanded text of the view; otherwise null .
TableType (string) --
The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).
Parameters (dict) --
These key-value pairs define properties associated with the table.
(string) --
(string) --
CreatedBy (string) --
The person or entity who created the table.
IsRegisteredWithLakeFormation (boolean) --
Indicates whether the table has been registered with AWS Lake Formation.
NextToken (string) --
A continuation token, present if the current list segment is not the last.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'TableList': [
{
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def get_tags(ResourceArn=None):
"""
Retrieves a list of tags associated with a resource.
See also: AWS API Documentation
Exceptions
:example: response = client.get_tags(
ResourceArn='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource for which to retrieve tags.\n
:rtype: dict
ReturnsResponse Syntax{
'Tags': {
'string': 'string'
}
}
Response Structure
(dict) --
Tags (dict) --The requested tags.
(string) --
(string) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.EntityNotFoundException
:return: {
'Tags': {
'string': 'string'
}
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.EntityNotFoundException
"""
pass
def get_trigger(Name=None):
"""
Retrieves the definition of a trigger.
See also: AWS API Documentation
Exceptions
:example: response = client.get_trigger(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the trigger to retrieve.\n
:rtype: dict
ReturnsResponse Syntax{
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
}
Response Structure
(dict) --
Trigger (dict) --The requested trigger definition.
Name (string) --The name of the trigger.
WorkflowName (string) --The name of the workflow associated with the trigger.
Id (string) --Reserved for future use.
Type (string) --The type of trigger that this is.
State (string) --The current state of the trigger.
Description (string) --A description of this trigger.
Schedule (string) --A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --The actions initiated by this trigger.
(dict) --Defines an action to be initiated by a trigger.
JobName (string) --The name of a job to be executed.
Arguments (dict) --The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --The name of the crawler to be used with this action.
Predicate (dict) --The predicate of this trigger, which defines when it will fire.
Logical (string) --An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --A list of the conditions that determine when the trigger will fire.
(dict) --Defines a condition under which a trigger fires.
LogicalOperator (string) --A logical operator.
JobName (string) --The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --The name of the crawler to which this condition applies.
CrawlState (string) --The state of the crawler to which this condition applies.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def get_triggers(NextToken=None, DependentJobName=None, MaxResults=None):
"""
Gets all the triggers associated with a job.
See also: AWS API Documentation
Exceptions
:example: response = client.get_triggers(
NextToken='string',
DependentJobName='string',
MaxResults=123
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:type DependentJobName: string
:param DependentJobName: The name of the job to retrieve triggers for. The trigger that can start this job is returned, and if there is no such trigger, all triggers are returned.
:type MaxResults: integer
:param MaxResults: The maximum size of the response.
:rtype: dict
ReturnsResponse Syntax
{
'Triggers': [
{
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Triggers (list) --
A list of triggers for the specified job.
(dict) --
Information about a specific trigger.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
NextToken (string) --
A continuation token, if not all the requested triggers have yet been returned.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Triggers': [
{
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def get_user_defined_function(CatalogId=None, DatabaseName=None, FunctionName=None):
"""
Retrieves a specified function definition from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_user_defined_function(
CatalogId='string',
DatabaseName='string',
FunctionName='string'
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the function to be retrieved is located. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database where the function is located.\n
:type FunctionName: string
:param FunctionName: [REQUIRED]\nThe name of the function.\n
:rtype: dict
ReturnsResponse Syntax
{
'UserDefinedFunction': {
'FunctionName': 'string',
'ClassName': 'string',
'OwnerName': 'string',
'OwnerType': 'USER'|'ROLE'|'GROUP',
'CreateTime': datetime(2015, 1, 1),
'ResourceUris': [
{
'ResourceType': 'JAR'|'FILE'|'ARCHIVE',
'Uri': 'string'
},
]
}
}
Response Structure
(dict) --
UserDefinedFunction (dict) --
The requested function definition.
FunctionName (string) --
The name of the function.
ClassName (string) --
The Java class that contains the function code.
OwnerName (string) --
The owner of the function.
OwnerType (string) --
The owner type.
CreateTime (datetime) --
The time at which the function was created.
ResourceUris (list) --
The resource URIs for the function.
(dict) --
The URIs for function resources.
ResourceType (string) --
The type of the resource.
Uri (string) --
The URI for accessing the resource.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'UserDefinedFunction': {
'FunctionName': 'string',
'ClassName': 'string',
'OwnerName': 'string',
'OwnerType': 'USER'|'ROLE'|'GROUP',
'CreateTime': datetime(2015, 1, 1),
'ResourceUris': [
{
'ResourceType': 'JAR'|'FILE'|'ARCHIVE',
'Uri': 'string'
},
]
}
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
"""
pass
def get_user_defined_functions(CatalogId=None, DatabaseName=None, Pattern=None, NextToken=None, MaxResults=None):
"""
Retrieves multiple function definitions from the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.get_user_defined_functions(
CatalogId='string',
DatabaseName='string',
Pattern='string',
NextToken='string',
MaxResults=123
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the functions to be retrieved are located. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: The name of the catalog database where the functions are located.
:type Pattern: string
:param Pattern: [REQUIRED]\nAn optional function-name pattern string that filters the function definitions returned.\n
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation call.
:type MaxResults: integer
:param MaxResults: The maximum number of functions to return in one response.
:rtype: dict
ReturnsResponse Syntax
{
'UserDefinedFunctions': [
{
'FunctionName': 'string',
'ClassName': 'string',
'OwnerName': 'string',
'OwnerType': 'USER'|'ROLE'|'GROUP',
'CreateTime': datetime(2015, 1, 1),
'ResourceUris': [
{
'ResourceType': 'JAR'|'FILE'|'ARCHIVE',
'Uri': 'string'
},
]
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
UserDefinedFunctions (list) --
A list of requested function definitions.
(dict) --
Represents the equivalent of a Hive user-defined function (UDF ) definition.
FunctionName (string) --
The name of the function.
ClassName (string) --
The Java class that contains the function code.
OwnerName (string) --
The owner of the function.
OwnerType (string) --
The owner type.
CreateTime (datetime) --
The time at which the function was created.
ResourceUris (list) --
The resource URIs for the function.
(dict) --
The URIs for function resources.
ResourceType (string) --
The type of the resource.
Uri (string) --
The URI for accessing the resource.
NextToken (string) --
A continuation token, if the list of functions returned does not include the last requested function.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.GlueEncryptionException
:return: {
'UserDefinedFunctions': [
{
'FunctionName': 'string',
'ClassName': 'string',
'OwnerName': 'string',
'OwnerType': 'USER'|'ROLE'|'GROUP',
'CreateTime': datetime(2015, 1, 1),
'ResourceUris': [
{
'ResourceType': 'JAR'|'FILE'|'ARCHIVE',
'Uri': 'string'
},
]
},
],
'NextToken': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.GlueEncryptionException
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def get_workflow(Name=None, IncludeGraph=None):
"""
Retrieves resource metadata for a workflow.
See also: AWS API Documentation
Exceptions
:example: response = client.get_workflow(
Name='string',
IncludeGraph=True|False
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the workflow to retrieve.\n
:type IncludeGraph: boolean
:param IncludeGraph: Specifies whether to include a graph when returning the workflow resource metadata.
:rtype: dict
ReturnsResponse Syntax
{
'Workflow': {
'Name': 'string',
'Description': 'string',
'DefaultRunProperties': {
'string': 'string'
},
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'LastRun': {
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
}
}
Response Structure
(dict) --
Workflow (dict) --
The resource metadata for the workflow.
Name (string) --
The name of the workflow representing the flow.
Description (string) --
A description of the workflow.
DefaultRunProperties (dict) --
A collection of properties to be used as part of each execution of the workflow.
(string) --
(string) --
CreatedOn (datetime) --
The date and time when the workflow was created.
LastModifiedOn (datetime) --
The date and time when the workflow was last modified.
LastRun (dict) --
The information about the last execution of the workflow.
Name (string) --
Name of the workflow which was executed.
WorkflowRunId (string) --
The ID of this workflow run.
WorkflowRunProperties (dict) --
The workflow run properties which were set during the run.
(string) --
(string) --
StartedOn (datetime) --
The date and time when the workflow run was started.
CompletedOn (datetime) --
The date and time when the workflow run completed.
Status (string) --
The status of the workflow run.
Statistics (dict) --
The statistics of the run.
TotalActions (integer) --
Total number of Actions in the workflow run.
TimeoutActions (integer) --
Total number of Actions which timed out.
FailedActions (integer) --
Total number of Actions which have failed.
StoppedActions (integer) --
Total number of Actions which have stopped.
SucceededActions (integer) --
Total number of Actions which have succeeded.
RunningActions (integer) --
Total number Actions in running state.
Graph (dict) --
The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.
Nodes (list) --
A list of the the AWS Glue components belong to the workflow represented as nodes.
(dict) --
A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.
Type (string) --
The type of AWS Glue component represented by the node.
Name (string) --
The name of the AWS Glue component represented by the node.
UniqueId (string) --
The unique Id assigned to the node within the workflow.
TriggerDetails (dict) --
Details of the Trigger when the node represents a Trigger.
Trigger (dict) --
The information of the trigger represented by the trigger node.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
JobDetails (dict) --
Details of the Job when the node represents a Job.
JobRuns (list) --
The information for the job runs represented by the job node.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
CrawlerDetails (dict) --
Details of the crawler when the node represents a crawler.
Crawls (list) --
A list of crawls represented by the crawl node.
(dict) --
The details of a crawl in the workflow.
State (string) --
The state of the crawler.
StartedOn (datetime) --
The date and time on which the crawl started.
CompletedOn (datetime) --
The date and time on which the crawl completed.
ErrorMessage (string) --
The error message associated with the crawl.
LogGroup (string) --
The log group associated with the crawl.
LogStream (string) --
The log stream associated with the crawl.
Edges (list) --
A list of all the directed connections between the nodes belonging to the workflow.
(dict) --
An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.
SourceId (string) --
The unique of the node within the workflow where the edge starts.
DestinationId (string) --
The unique of the node within the workflow where the edge ends.
Graph (dict) --
The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.
Nodes (list) --
A list of the the AWS Glue components belong to the workflow represented as nodes.
(dict) --
A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.
Type (string) --
The type of AWS Glue component represented by the node.
Name (string) --
The name of the AWS Glue component represented by the node.
UniqueId (string) --
The unique Id assigned to the node within the workflow.
TriggerDetails (dict) --
Details of the Trigger when the node represents a Trigger.
Trigger (dict) --
The information of the trigger represented by the trigger node.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
JobDetails (dict) --
Details of the Job when the node represents a Job.
JobRuns (list) --
The information for the job runs represented by the job node.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
CrawlerDetails (dict) --
Details of the crawler when the node represents a crawler.
Crawls (list) --
A list of crawls represented by the crawl node.
(dict) --
The details of a crawl in the workflow.
State (string) --
The state of the crawler.
StartedOn (datetime) --
The date and time on which the crawl started.
CompletedOn (datetime) --
The date and time on which the crawl completed.
ErrorMessage (string) --
The error message associated with the crawl.
LogGroup (string) --
The log group associated with the crawl.
LogStream (string) --
The log stream associated with the crawl.
Edges (list) --
A list of all the directed connections between the nodes belonging to the workflow.
(dict) --
An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.
SourceId (string) --
The unique of the node within the workflow where the edge starts.
DestinationId (string) --
The unique of the node within the workflow where the edge ends.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Workflow': {
'Name': 'string',
'Description': 'string',
'DefaultRunProperties': {
'string': 'string'
},
'CreatedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'LastRun': {
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
}
}
:returns:
(string) --
(string) --
"""
pass
def get_workflow_run(Name=None, RunId=None, IncludeGraph=None):
"""
Retrieves the metadata for a given workflow run.
See also: AWS API Documentation
Exceptions
:example: response = client.get_workflow_run(
Name='string',
RunId='string',
IncludeGraph=True|False
)
:type Name: string
:param Name: [REQUIRED]\nName of the workflow being run.\n
:type RunId: string
:param RunId: [REQUIRED]\nThe ID of the workflow run.\n
:type IncludeGraph: boolean
:param IncludeGraph: Specifies whether to include the workflow graph in response or not.
:rtype: dict
ReturnsResponse Syntax
{
'Run': {
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
}
}
Response Structure
(dict) --
Run (dict) --
The requested workflow run metadata.
Name (string) --
Name of the workflow which was executed.
WorkflowRunId (string) --
The ID of this workflow run.
WorkflowRunProperties (dict) --
The workflow run properties which were set during the run.
(string) --
(string) --
StartedOn (datetime) --
The date and time when the workflow run was started.
CompletedOn (datetime) --
The date and time when the workflow run completed.
Status (string) --
The status of the workflow run.
Statistics (dict) --
The statistics of the run.
TotalActions (integer) --
Total number of Actions in the workflow run.
TimeoutActions (integer) --
Total number of Actions which timed out.
FailedActions (integer) --
Total number of Actions which have failed.
StoppedActions (integer) --
Total number of Actions which have stopped.
SucceededActions (integer) --
Total number of Actions which have succeeded.
RunningActions (integer) --
Total number Actions in running state.
Graph (dict) --
The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.
Nodes (list) --
A list of the the AWS Glue components belong to the workflow represented as nodes.
(dict) --
A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.
Type (string) --
The type of AWS Glue component represented by the node.
Name (string) --
The name of the AWS Glue component represented by the node.
UniqueId (string) --
The unique Id assigned to the node within the workflow.
TriggerDetails (dict) --
Details of the Trigger when the node represents a Trigger.
Trigger (dict) --
The information of the trigger represented by the trigger node.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
JobDetails (dict) --
Details of the Job when the node represents a Job.
JobRuns (list) --
The information for the job runs represented by the job node.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
CrawlerDetails (dict) --
Details of the crawler when the node represents a crawler.
Crawls (list) --
A list of crawls represented by the crawl node.
(dict) --
The details of a crawl in the workflow.
State (string) --
The state of the crawler.
StartedOn (datetime) --
The date and time on which the crawl started.
CompletedOn (datetime) --
The date and time on which the crawl completed.
ErrorMessage (string) --
The error message associated with the crawl.
LogGroup (string) --
The log group associated with the crawl.
LogStream (string) --
The log stream associated with the crawl.
Edges (list) --
A list of all the directed connections between the nodes belonging to the workflow.
(dict) --
An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.
SourceId (string) --
The unique of the node within the workflow where the edge starts.
DestinationId (string) --
The unique of the node within the workflow where the edge ends.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Run': {
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
}
}
:returns:
(string) --
(string) --
"""
pass
def get_workflow_run_properties(Name=None, RunId=None):
"""
Retrieves the workflow run properties which were set during the run.
See also: AWS API Documentation
Exceptions
:example: response = client.get_workflow_run_properties(
Name='string',
RunId='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the workflow which was run.\n
:type RunId: string
:param RunId: [REQUIRED]\nThe ID of the workflow run whose run properties should be returned.\n
:rtype: dict
ReturnsResponse Syntax
{
'RunProperties': {
'string': 'string'
}
}
Response Structure
(dict) --
RunProperties (dict) --
The workflow run properties which were set during the specified run.
(string) --
(string) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'RunProperties': {
'string': 'string'
}
}
:returns:
(string) --
(string) --
"""
pass
def get_workflow_runs(Name=None, IncludeGraph=None, NextToken=None, MaxResults=None):
"""
Retrieves metadata for all runs of a given workflow.
See also: AWS API Documentation
Exceptions
:example: response = client.get_workflow_runs(
Name='string',
IncludeGraph=True|False,
NextToken='string',
MaxResults=123
)
:type Name: string
:param Name: [REQUIRED]\nName of the workflow whose metadata of runs should be returned.\n
:type IncludeGraph: boolean
:param IncludeGraph: Specifies whether to include the workflow graph in response or not.
:type NextToken: string
:param NextToken: The maximum size of the response.
:type MaxResults: integer
:param MaxResults: The maximum number of workflow runs to be included in the response.
:rtype: dict
ReturnsResponse Syntax
{
'Runs': [
{
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Runs (list) --
A list of workflow run metadata objects.
(dict) --
A workflow run is an execution of a workflow providing all the runtime information.
Name (string) --
Name of the workflow which was executed.
WorkflowRunId (string) --
The ID of this workflow run.
WorkflowRunProperties (dict) --
The workflow run properties which were set during the run.
(string) --
(string) --
StartedOn (datetime) --
The date and time when the workflow run was started.
CompletedOn (datetime) --
The date and time when the workflow run completed.
Status (string) --
The status of the workflow run.
Statistics (dict) --
The statistics of the run.
TotalActions (integer) --
Total number of Actions in the workflow run.
TimeoutActions (integer) --
Total number of Actions which timed out.
FailedActions (integer) --
Total number of Actions which have failed.
StoppedActions (integer) --
Total number of Actions which have stopped.
SucceededActions (integer) --
Total number of Actions which have succeeded.
RunningActions (integer) --
Total number Actions in running state.
Graph (dict) --
The graph representing all the AWS Glue components that belong to the workflow as nodes and directed connections between them as edges.
Nodes (list) --
A list of the the AWS Glue components belong to the workflow represented as nodes.
(dict) --
A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.
Type (string) --
The type of AWS Glue component represented by the node.
Name (string) --
The name of the AWS Glue component represented by the node.
UniqueId (string) --
The unique Id assigned to the node within the workflow.
TriggerDetails (dict) --
Details of the Trigger when the node represents a Trigger.
Trigger (dict) --
The information of the trigger represented by the trigger node.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
JobDetails (dict) --
Details of the Job when the node represents a Job.
JobRuns (list) --
The information for the job runs represented by the job node.
(dict) --
Contains information about a job run.
Id (string) --
The ID of this job run.
Attempt (integer) --
The number of the attempt to run this job.
PreviousRunId (string) --
The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.
TriggerName (string) --
The name of the trigger that started this job run.
JobName (string) --
The name of the job definition being used in this run.
StartedOn (datetime) --
The date and time at which this job run was started.
LastModifiedOn (datetime) --
The last time that this job run was modified.
CompletedOn (datetime) --
The date and time that this job run completed.
JobRunState (string) --
The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses .
Arguments (dict) --
The job arguments associated with this run. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
ErrorMessage (string) --
An error message associated with this job run.
PredecessorRuns (list) --
A list of predecessors to this job run.
(dict) --
A job run that was used in the predicate of a conditional trigger that triggered this job run.
JobName (string) --
The name of the job definition used by the predecessor job run.
RunId (string) --
The job-run ID of the predecessor job run.
AllocatedCapacity (integer) --
This field is deprecated. Use MaxCapacity instead.
The number of AWS Glue data processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
ExecutionTime (integer) --
The amount of time (in seconds) that the job run consumed resources.
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
MaxCapacity (float) --
The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .
Do not set Max Capacity if using WorkerType and NumberOfWorkers .
The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
WorkerType (string) --
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
NumberOfWorkers (integer) --
The number of workers of a defined workerType that are allocated when a job runs.
The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this job run.
LogGroupName (string) --
The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/ , in which case the default encryption is NONE . If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security configuration is used to encrypt the log group.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
GlueVersion (string) --
Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.
For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
Jobs that are created without specifying a Glue version default to Glue 0.9.
CrawlerDetails (dict) --
Details of the crawler when the node represents a crawler.
Crawls (list) --
A list of crawls represented by the crawl node.
(dict) --
The details of a crawl in the workflow.
State (string) --
The state of the crawler.
StartedOn (datetime) --
The date and time on which the crawl started.
CompletedOn (datetime) --
The date and time on which the crawl completed.
ErrorMessage (string) --
The error message associated with the crawl.
LogGroup (string) --
The log group associated with the crawl.
LogStream (string) --
The log stream associated with the crawl.
Edges (list) --
A list of all the directed connections between the nodes belonging to the workflow.
(dict) --
An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.
SourceId (string) --
The unique of the node within the workflow where the edge starts.
DestinationId (string) --
The unique of the node within the workflow where the edge ends.
NextToken (string) --
A continuation token, if not all requested workflow runs have been returned.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Runs': [
{
'Name': 'string',
'WorkflowRunId': 'string',
'WorkflowRunProperties': {
'string': 'string'
},
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'Status': 'RUNNING'|'COMPLETED'|'STOPPING'|'STOPPED',
'Statistics': {
'TotalActions': 123,
'TimeoutActions': 123,
'FailedActions': 123,
'StoppedActions': 123,
'SucceededActions': 123,
'RunningActions': 123
},
'Graph': {
'Nodes': [
{
'Type': 'CRAWLER'|'JOB'|'TRIGGER',
'Name': 'string',
'UniqueId': 'string',
'TriggerDetails': {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
},
'JobDetails': {
'JobRuns': [
{
'Id': 'string',
'Attempt': 123,
'PreviousRunId': 'string',
'TriggerName': 'string',
'JobName': 'string',
'StartedOn': datetime(2015, 1, 1),
'LastModifiedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'JobRunState': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'Arguments': {
'string': 'string'
},
'ErrorMessage': 'string',
'PredecessorRuns': [
{
'JobName': 'string',
'RunId': 'string'
},
],
'AllocatedCapacity': 123,
'ExecutionTime': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'LogGroupName': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
},
]
},
'CrawlerDetails': {
'Crawls': [
{
'State': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED',
'StartedOn': datetime(2015, 1, 1),
'CompletedOn': datetime(2015, 1, 1),
'ErrorMessage': 'string',
'LogGroup': 'string',
'LogStream': 'string'
},
]
}
},
],
'Edges': [
{
'SourceId': 'string',
'DestinationId': 'string'
},
]
}
},
],
'NextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def import_catalog_to_glue(CatalogId=None):
"""
Imports an existing Amazon Athena Data Catalog to AWS Glue
See also: AWS API Documentation
Exceptions
:example: response = client.import_catalog_to_glue(
CatalogId='string'
)
:type CatalogId: string
:param CatalogId: The ID of the catalog to import. Currently, this should be the AWS account ID.
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def list_crawlers(MaxResults=None, NextToken=None, Tags=None):
"""
Retrieves the names of all crawler resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.
This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.
See also: AWS API Documentation
Exceptions
:example: response = client.list_crawlers(
MaxResults=123,
NextToken='string',
Tags={
'string': 'string'
}
)
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:type Tags: dict
:param Tags: Specifies to return only these tagged resources.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'CrawlerNames': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
CrawlerNames (list) --
The names of all crawlers in the account, or the crawlers with the specified tags.
(string) --
NextToken (string) --
A continuation token, if the returned list does not contain the last metric available.
Exceptions
Glue.Client.exceptions.OperationTimeoutException
:return: {
'CrawlerNames': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_dev_endpoints(NextToken=None, MaxResults=None, Tags=None):
"""
Retrieves the names of all DevEndpoint resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.
This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.
See also: AWS API Documentation
Exceptions
:example: response = client.list_dev_endpoints(
NextToken='string',
MaxResults=123,
Tags={
'string': 'string'
}
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:type Tags: dict
:param Tags: Specifies to return only these tagged resources.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'DevEndpointNames': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
DevEndpointNames (list) --
The names of all the DevEndpoint s in the account, or the DevEndpoint s with the specified tags.
(string) --
NextToken (string) --
A continuation token, if the returned list does not contain the last metric available.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'DevEndpointNames': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_jobs(NextToken=None, MaxResults=None, Tags=None):
"""
Retrieves the names of all job resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.
This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.
See also: AWS API Documentation
Exceptions
:example: response = client.list_jobs(
NextToken='string',
MaxResults=123,
Tags={
'string': 'string'
}
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:type Tags: dict
:param Tags: Specifies to return only these tagged resources.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'JobNames': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
JobNames (list) --
The names of all jobs in the account, or the jobs with the specified tags.
(string) --
NextToken (string) --
A continuation token, if the returned list does not contain the last metric available.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'JobNames': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_ml_transforms(NextToken=None, MaxResults=None, Filter=None, Sort=None, Tags=None):
"""
Retrieves a sortable, filterable list of existing AWS Glue machine learning transforms in this AWS account, or the resources with the specified tag. This operation takes the optional Tags field, which you can use as a filter of the responses so that tagged resources can be retrieved as a group. If you choose to use tag filtering, only resources with the tags are retrieved.
See also: AWS API Documentation
Exceptions
:example: response = client.list_ml_transforms(
NextToken='string',
MaxResults=123,
Filter={
'Name': 'string',
'TransformType': 'FIND_MATCHES',
'Status': 'NOT_READY'|'READY'|'DELETING',
'GlueVersion': 'string',
'CreatedBefore': datetime(2015, 1, 1),
'CreatedAfter': datetime(2015, 1, 1),
'LastModifiedBefore': datetime(2015, 1, 1),
'LastModifiedAfter': datetime(2015, 1, 1),
'Schema': [
{
'Name': 'string',
'DataType': 'string'
},
]
},
Sort={
'Column': 'NAME'|'TRANSFORM_TYPE'|'STATUS'|'CREATED'|'LAST_MODIFIED',
'SortDirection': 'DESCENDING'|'ASCENDING'
},
Tags={
'string': 'string'
}
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:type Filter: dict
:param Filter: A TransformFilterCriteria used to filter the machine learning transforms.\n\nName (string) --A unique transform name that is used to filter the machine learning transforms.\n\nTransformType (string) --The type of machine learning transform that is used to filter the machine learning transforms.\n\nStatus (string) --Filters the list of machine learning transforms by the last known status of the transforms (to indicate whether a transform can be used or not). One of 'NOT_READY', 'READY', or 'DELETING'.\n\nGlueVersion (string) --This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.\n\nCreatedBefore (datetime) --The time and date before which the transforms were created.\n\nCreatedAfter (datetime) --The time and date after which the transforms were created.\n\nLastModifiedBefore (datetime) --Filter on transforms last modified before this date.\n\nLastModifiedAfter (datetime) --Filter on transforms last modified after this date.\n\nSchema (list) --Filters on datasets with a specific schema. The Map<Column, Type> object is an array of key-value pairs representing the schema this transform accepts, where Column is the name of a column, and Type is the type of the data such as an integer or string. Has an upper bound of 100 columns.\n\n(dict) --A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.\n\nName (string) --The name of the column.\n\nDataType (string) --The type of data in the column.\n\n\n\n\n\n\n
:type Sort: dict
:param Sort: A TransformSortCriteria used to sort the machine learning transforms.\n\nColumn (string) -- [REQUIRED]The column to be used in the sorting criteria that are associated with the machine learning transform.\n\nSortDirection (string) -- [REQUIRED]The sort direction to be used in the sorting criteria that are associated with the machine learning transform.\n\n\n
:type Tags: dict
:param Tags: Specifies to return only these tagged resources.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'TransformIds': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
TransformIds (list) --
The identifiers of all the machine learning transforms in the account, or the machine learning transforms with the specified tags.
(string) --
NextToken (string) --
A continuation token, if the returned list does not contain the last metric available.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TransformIds': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_triggers(NextToken=None, DependentJobName=None, MaxResults=None, Tags=None):
"""
Retrieves the names of all trigger resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.
This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.
See also: AWS API Documentation
Exceptions
:example: response = client.list_triggers(
NextToken='string',
DependentJobName='string',
MaxResults=123,
Tags={
'string': 'string'
}
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:type DependentJobName: string
:param DependentJobName: The name of the job for which to retrieve triggers. The trigger that can start this job is returned. If there is no such trigger, all triggers are returned.
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:type Tags: dict
:param Tags: Specifies to return only these tagged resources.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'TriggerNames': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
TriggerNames (list) --
The names of all triggers in the account, or the triggers with the specified tags.
(string) --
NextToken (string) --
A continuation token, if the returned list does not contain the last metric available.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'TriggerNames': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_workflows(NextToken=None, MaxResults=None):
"""
Lists names of workflows created in the account.
See also: AWS API Documentation
Exceptions
:example: response = client.list_workflows(
NextToken='string',
MaxResults=123
)
:type NextToken: string
:param NextToken: A continuation token, if this is a continuation request.
:type MaxResults: integer
:param MaxResults: The maximum size of a list to return.
:rtype: dict
ReturnsResponse Syntax
{
'Workflows': [
'string',
],
'NextToken': 'string'
}
Response Structure
(dict) --
Workflows (list) --
List of names of workflows in the account.
(string) --
NextToken (string) --
A continuation token, if not all workflow names have been returned.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'Workflows': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def put_data_catalog_encryption_settings(CatalogId=None, DataCatalogEncryptionSettings=None):
"""
Sets the security configuration for a specified catalog. After the configuration has been set, the specified encryption is applied to every catalog write thereafter.
See also: AWS API Documentation
Exceptions
:example: response = client.put_data_catalog_encryption_settings(
CatalogId='string',
DataCatalogEncryptionSettings={
'EncryptionAtRest': {
'CatalogEncryptionMode': 'DISABLED'|'SSE-KMS',
'SseAwsKmsKeyId': 'string'
},
'ConnectionPasswordEncryption': {
'ReturnConnectionPasswordEncrypted': True|False,
'AwsKmsKeyId': 'string'
}
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog to set the security configuration for. If none is provided, the AWS account ID is used by default.
:type DataCatalogEncryptionSettings: dict
:param DataCatalogEncryptionSettings: [REQUIRED]\nThe security configuration to set.\n\nEncryptionAtRest (dict) --Specifies the encryption-at-rest configuration for the Data Catalog.\n\nCatalogEncryptionMode (string) -- [REQUIRED]The encryption-at-rest mode for encrypting Data Catalog data.\n\nSseAwsKmsKeyId (string) --The ID of the AWS KMS key to use for encryption at rest.\n\n\n\nConnectionPasswordEncryption (dict) --When connection password protection is enabled, the Data Catalog uses a customer-provided key to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption.\n\nReturnConnectionPasswordEncrypted (boolean) -- [REQUIRED]When the ReturnConnectionPasswordEncrypted flag is set to 'true', passwords remain encrypted in the responses of GetConnection and GetConnections . This encryption takes effect independently from catalog encryption.\n\nAwsKmsKeyId (string) --An AWS KMS key that is used to encrypt the connection password.\nIf connection password protection is enabled, the caller of CreateConnection and UpdateConnection needs at least kms:Encrypt permission on the specified AWS KMS key, to encrypt passwords before storing them in the Data Catalog.\nYou can set the decrypt permission to enable or restrict access on the password key according to your security requirements.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def put_resource_policy(PolicyInJson=None, PolicyHashCondition=None, PolicyExistsCondition=None):
"""
Sets the Data Catalog resource policy for access control.
See also: AWS API Documentation
Exceptions
:example: response = client.put_resource_policy(
PolicyInJson='string',
PolicyHashCondition='string',
PolicyExistsCondition='MUST_EXIST'|'NOT_EXIST'|'NONE'
)
:type PolicyInJson: string
:param PolicyInJson: [REQUIRED]\nContains the policy document to set, in JSON format.\n
:type PolicyHashCondition: string
:param PolicyHashCondition: The hash value returned when the previous policy was set using PutResourcePolicy . Its purpose is to prevent concurrent modifications of a policy. Do not use this parameter if no previous policy has been set.
:type PolicyExistsCondition: string
:param PolicyExistsCondition: A value of MUST_EXIST is used to update a policy. A value of NOT_EXIST is used to create a new policy. If a value of NONE or a null value is used, the call will not depend on the existence of a policy.
:rtype: dict
ReturnsResponse Syntax
{
'PolicyHash': 'string'
}
Response Structure
(dict) --
PolicyHash (string) --
A hash of the policy that has just been set. This must be included in a subsequent call that overwrites or updates this policy.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.ConditionCheckFailureException
:return: {
'PolicyHash': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.ConditionCheckFailureException
"""
pass
def put_workflow_run_properties(Name=None, RunId=None, RunProperties=None):
"""
Puts the specified workflow run properties for the given workflow run. If a property already exists for the specified run, then it overrides the value otherwise adds the property to existing properties.
See also: AWS API Documentation
Exceptions
:example: response = client.put_workflow_run_properties(
Name='string',
RunId='string',
RunProperties={
'string': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]\nName of the workflow which was run.\n
:type RunId: string
:param RunId: [REQUIRED]\nThe ID of the workflow run for which the run properties should be updated.\n
:type RunProperties: dict
:param RunProperties: [REQUIRED]\nThe properties to put for the specified run.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.AlreadyExistsException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentModificationException
:return: {}
:returns:
(dict) --
"""
pass
def reset_job_bookmark(JobName=None, RunId=None):
"""
Resets a bookmark entry.
See also: AWS API Documentation
Exceptions
:example: response = client.reset_job_bookmark(
JobName='string',
RunId='string'
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job in question.\n
:type RunId: string
:param RunId: The unique run identifier associated with this job run.
:rtype: dict
ReturnsResponse Syntax
{
'JobBookmarkEntry': {
'JobName': 'string',
'Version': 123,
'Run': 123,
'Attempt': 123,
'PreviousRunId': 'string',
'RunId': 'string',
'JobBookmark': 'string'
}
}
Response Structure
(dict) --
JobBookmarkEntry (dict) --
The reset bookmark entry.
JobName (string) --
The name of the job in question.
Version (integer) --
The version of the job.
Run (integer) --
The run ID number.
Attempt (integer) --
The attempt ID number.
PreviousRunId (string) --
The unique run identifier associated with the previous job run.
RunId (string) --
The run ID number.
JobBookmark (string) --
The bookmark itself.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'JobBookmarkEntry': {
'JobName': 'string',
'Version': 123,
'Run': 123,
'Attempt': 123,
'PreviousRunId': 'string',
'RunId': 'string',
'JobBookmark': 'string'
}
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def search_tables(CatalogId=None, NextToken=None, Filters=None, SearchText=None, SortCriteria=None, MaxResults=None):
"""
Searches a set of tables based on properties in the table metadata as well as on the parent database. You can search against text or filter conditions.
You can only get tables that you have access to based on the security policies defined in Lake Formation. You need at least a read-only access to the table for it to be returned. If you do not have access to all the columns in the table, these columns will not be searched against when returning the list of tables back to you. If you have access to the columns but not the data in the columns, those columns and the associated metadata for those columns will be included in the search.
See also: AWS API Documentation
Exceptions
:example: response = client.search_tables(
CatalogId='string',
NextToken='string',
Filters=[
{
'Key': 'string',
'Value': 'string',
'Comparator': 'EQUALS'|'GREATER_THAN'|'LESS_THAN'|'GREATER_THAN_EQUALS'|'LESS_THAN_EQUALS'
},
],
SearchText='string',
SortCriteria=[
{
'FieldName': 'string',
'Sort': 'ASC'|'DESC'
},
],
MaxResults=123
)
:type CatalogId: string
:param CatalogId: A unique identifier, consisting of `` account_id /datalake`` .
:type NextToken: string
:param NextToken: A continuation token, included if this is a continuation call.
:type Filters: list
:param Filters: A list of key-value pairs, and a comparator used to filter the search results. Returns all entities matching the predicate.\n\n(dict) --Defines a property predicate.\n\nKey (string) --The key of the property.\n\nValue (string) --The value of the property.\n\nComparator (string) --The comparator used to compare this property to others.\n\n\n\n\n
:type SearchText: string
:param SearchText: A string used for a text search.\nSpecifying a value in quotes filters based on an exact match to the value.\n
:type SortCriteria: list
:param SortCriteria: A list of criteria for sorting the results by a field name, in an ascending or descending order.\n\n(dict) --Specifies a field to sort by and a sort order.\n\nFieldName (string) --The name of the field on which to sort.\n\nSort (string) --An ascending or descending sort.\n\n\n\n\n
:type MaxResults: integer
:param MaxResults: The maximum number of tables to return in a single response.
:rtype: dict
ReturnsResponse Syntax
{
'NextToken': 'string',
'TableList': [
{
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
]
}
Response Structure
(dict) --
NextToken (string) --
A continuation token, present if the current list segment is not the last.
TableList (list) --
A list of the requested Table objects. The SearchTables response returns only the tables that you have access to.
(dict) --
Represents a collection of related data organized in columns and rows.
Name (string) --
The table name. For Hive compatibility, this must be entirely lowercase.
DatabaseName (string) --
The name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.
Description (string) --
A description of the table.
Owner (string) --
The owner of the table.
CreateTime (datetime) --
The time when the table definition was created in the Data Catalog.
UpdateTime (datetime) --
The last time that the table was updated.
LastAccessTime (datetime) --
The last time that the table was accessed. This is usually taken from HDFS, and might not be reliable.
LastAnalyzedTime (datetime) --
The last time that column statistics were computed for this table.
Retention (integer) --
The retention time for this table.
StorageDescriptor (dict) --
A storage descriptor containing information about the physical storage of this table.
Columns (list) --
A list of the Columns in the table.
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
Location (string) --
The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
InputFormat (string) --
The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.
OutputFormat (string) --
The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.
Compressed (boolean) --
True if the data in the table is compressed, or False if not.
NumberOfBuckets (integer) --
Must be specified if the table contains any dimension columns.
SerdeInfo (dict) --
The serialization/deserialization (SerDe) information.
Name (string) --
Name of the SerDe.
SerializationLibrary (string) --
Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
Parameters (dict) --
These key-value pairs define initialization parameters for the SerDe.
(string) --
(string) --
BucketColumns (list) --
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
(string) --
SortColumns (list) --
A list specifying the sort order of each bucket in the table.
(dict) --
Specifies the sort order of a sorted column.
Column (string) --
The name of the column.
SortOrder (integer) --
Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).
Parameters (dict) --
The user-supplied properties in key-value form.
(string) --
(string) --
SkewedInfo (dict) --
The information about values that appear frequently in a column (skewed values).
SkewedColumnNames (list) --
A list of names of columns that contain skewed values.
(string) --
SkewedColumnValues (list) --
A list of values that appear so frequently as to be considered skewed.
(string) --
SkewedColumnValueLocationMaps (dict) --
A mapping of skewed values to the columns that contain them.
(string) --
(string) --
StoredAsSubDirectories (boolean) --
True if the table data is stored in subdirectories, or False if not.
PartitionKeys (list) --
A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.
When you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:
"PartitionKeys": []
(dict) --
A column in a Table .
Name (string) --
The name of the Column .
Type (string) --
The data type of the Column .
Comment (string) --
A free-form text comment.
Parameters (dict) --
These key-value pairs define properties associated with the column.
(string) --
(string) --
ViewOriginalText (string) --
If the table is a view, the original text of the view; otherwise null .
ViewExpandedText (string) --
If the table is a view, the expanded text of the view; otherwise null .
TableType (string) --
The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).
Parameters (dict) --
These key-value pairs define properties associated with the table.
(string) --
(string) --
CreatedBy (string) --
The person or entity who created the table.
IsRegisteredWithLakeFormation (boolean) --
Indicates whether the table has been registered with AWS Lake Formation.
Exceptions
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
:return: {
'NextToken': 'string',
'TableList': [
{
'Name': 'string',
'DatabaseName': 'string',
'Description': 'string',
'Owner': 'string',
'CreateTime': datetime(2015, 1, 1),
'UpdateTime': datetime(2015, 1, 1),
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
},
'CreatedBy': 'string',
'IsRegisteredWithLakeFormation': True|False
},
]
}
:returns:
(string) --
(string) --
"""
pass
def start_crawler(Name=None):
"""
Starts a crawl using the specified crawler, regardless of what is scheduled. If the crawler is already running, returns a CrawlerRunningException .
See also: AWS API Documentation
Exceptions
:example: response = client.start_crawler(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the crawler to start.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerRunningException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerRunningException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def start_crawler_schedule(CrawlerName=None):
"""
Changes the schedule state of the specified crawler to SCHEDULED , unless the crawler is already running or the schedule state is already SCHEDULED .
See also: AWS API Documentation
Exceptions
:example: response = client.start_crawler_schedule(
CrawlerName='string'
)
:type CrawlerName: string
:param CrawlerName: [REQUIRED]\nName of the crawler to schedule.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.SchedulerRunningException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.NoScheduleException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.SchedulerRunningException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.NoScheduleException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def start_export_labels_task_run(TransformId=None, OutputS3Path=None):
"""
Begins an asynchronous task to export all labeled data for a particular transform. This task is the only label-related API call that is not part of the typical active learning workflow. You typically use StartExportLabelsTaskRun when you want to work with all of your existing labels at the same time, such as when you want to remove or change labels that were previously submitted as truth. This API operation accepts the TransformId whose labels you want to export and an Amazon Simple Storage Service (Amazon S3) path to export the labels to. The operation returns a TaskRunId . You can check on the status of your task run by calling the GetMLTaskRun API.
See also: AWS API Documentation
Exceptions
:example: response = client.start_export_labels_task_run(
TransformId='string',
OutputS3Path='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:type OutputS3Path: string
:param OutputS3Path: [REQUIRED]\nThe Amazon S3 path where you export the labels.\n
:rtype: dict
ReturnsResponse Syntax
{
'TaskRunId': 'string'
}
Response Structure
(dict) --
TaskRunId (string) --
The unique identifier for the task run.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
:return: {
'TaskRunId': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
"""
pass
def start_import_labels_task_run(TransformId=None, InputS3Path=None, ReplaceAllLabels=None):
"""
Enables you to provide additional labels (examples of truth) to be used to teach the machine learning transform and improve its quality. This API operation is generally used as part of the active learning workflow that starts with the StartMLLabelingSetGenerationTaskRun call and that ultimately results in improving the quality of your machine learning transform.
After the StartMLLabelingSetGenerationTaskRun finishes, AWS Glue machine learning will have generated a series of questions for humans to answer. (Answering these questions is often called \'labeling\' in the machine learning workflows). In the case of the FindMatches transform, these questions are of the form, \xe2\x80\x9cWhat is the correct way to group these rows together into groups composed entirely of matching records?\xe2\x80\x9d After the labeling process is finished, users upload their answers/labels with a call to StartImportLabelsTaskRun . After StartImportLabelsTaskRun finishes, all future runs of the machine learning transform use the new and improved labels and perform a higher-quality transformation.
By default, StartMLLabelingSetGenerationTaskRun continually learns from and combines all labels that you upload unless you set Replace to true. If you set Replace to true, StartImportLabelsTaskRun deletes and forgets all previously uploaded labels and learns only from the exact set that you upload. Replacing labels can be helpful if you realize that you previously uploaded incorrect labels, and you believe that they are having a negative effect on your transform quality.
You can check on the status of your task run by calling the GetMLTaskRun operation.
See also: AWS API Documentation
Exceptions
:example: response = client.start_import_labels_task_run(
TransformId='string',
InputS3Path='string',
ReplaceAllLabels=True|False
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:type InputS3Path: string
:param InputS3Path: [REQUIRED]\nThe Amazon Simple Storage Service (Amazon S3) path from where you import the labels.\n
:type ReplaceAllLabels: boolean
:param ReplaceAllLabels: Indicates whether to overwrite your existing labels.
:rtype: dict
ReturnsResponse Syntax
{
'TaskRunId': 'string'
}
Response Structure
(dict) --
TaskRunId (string) --
The unique identifier for the task run.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.InternalServiceException
:return: {
'TaskRunId': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.InternalServiceException
"""
pass
def start_job_run(JobName=None, JobRunId=None, Arguments=None, AllocatedCapacity=None, Timeout=None, MaxCapacity=None, SecurityConfiguration=None, NotificationProperty=None, WorkerType=None, NumberOfWorkers=None):
"""
Starts a job run using a job definition.
See also: AWS API Documentation
Exceptions
:example: response = client.start_job_run(
JobName='string',
JobRunId='string',
Arguments={
'string': 'string'
},
AllocatedCapacity=123,
Timeout=123,
MaxCapacity=123.0,
SecurityConfiguration='string',
NotificationProperty={
'NotifyDelayAfter': 123
},
WorkerType='Standard'|'G.1X'|'G.2X',
NumberOfWorkers=123
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job definition to use.\n
:type JobRunId: string
:param JobRunId: The ID of a previous JobRun to retry.
:type Arguments: dict
:param Arguments: The job arguments specifically for this run. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n
:type AllocatedCapacity: integer
:param AllocatedCapacity: This field is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) to allocate to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n
:type Timeout: integer
:param Timeout: The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
:type MaxCapacity: float
:param MaxCapacity: The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name ='pythonshell'), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name ='glueetl'), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n
:type SecurityConfiguration: string
:param SecurityConfiguration: The name of the SecurityConfiguration structure to be used with this job run.
:type NotificationProperty: dict
:param NotificationProperty: Specifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n
:type WorkerType: string
:param WorkerType: The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n
:type NumberOfWorkers: integer
:param NumberOfWorkers: The number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n
:rtype: dict
ReturnsResponse Syntax
{
'JobRunId': 'string'
}
Response Structure
(dict) --
JobRunId (string) --
The ID assigned to this job run.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentRunsExceededException
:return: {
'JobRunId': 'string'
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentRunsExceededException
"""
pass
def start_ml_evaluation_task_run(TransformId=None):
"""
Starts a task to estimate the quality of the transform.
When you provide label sets as examples of truth, AWS Glue machine learning uses some of those examples to learn from them. The rest of the labels are used as a test to estimate quality.
Returns a unique identifier for the run. You can call GetMLTaskRun to get more information about the stats of the EvaluationTaskRun .
See also: AWS API Documentation
Exceptions
:example: response = client.start_ml_evaluation_task_run(
TransformId='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:rtype: dict
ReturnsResponse Syntax{
'TaskRunId': 'string'
}
Response Structure
(dict) --
TaskRunId (string) --The unique identifier associated with this run.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.ConcurrentRunsExceededException
Glue.Client.exceptions.MLTransformNotReadyException
:return: {
'TaskRunId': 'string'
}
"""
pass
def start_ml_labeling_set_generation_task_run(TransformId=None, OutputS3Path=None):
"""
Starts the active learning workflow for your machine learning transform to improve the transform\'s quality by generating label sets and adding labels.
When the StartMLLabelingSetGenerationTaskRun finishes, AWS Glue will have generated a "labeling set" or a set of questions for humans to answer.
In the case of the FindMatches transform, these questions are of the form, \xe2\x80\x9cWhat is the correct way to group these rows together into groups composed entirely of matching records?\xe2\x80\x9d
After the labeling process is finished, you can upload your labels with a call to StartImportLabelsTaskRun . After StartImportLabelsTaskRun finishes, all future runs of the machine learning transform will use the new and improved labels and perform a higher-quality transformation.
See also: AWS API Documentation
Exceptions
:example: response = client.start_ml_labeling_set_generation_task_run(
TransformId='string',
OutputS3Path='string'
)
:type TransformId: string
:param TransformId: [REQUIRED]\nThe unique identifier of the machine learning transform.\n
:type OutputS3Path: string
:param OutputS3Path: [REQUIRED]\nThe Amazon Simple Storage Service (Amazon S3) path where you generate the labeling set.\n
:rtype: dict
ReturnsResponse Syntax
{
'TaskRunId': 'string'
}
Response Structure
(dict) --
TaskRunId (string) --
The unique run identifier that is associated with this task run.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.ConcurrentRunsExceededException
:return: {
'TaskRunId': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.ConcurrentRunsExceededException
"""
pass
def start_trigger(Name=None):
"""
Starts an existing trigger. See Triggering Jobs for information about how different types of trigger are started.
See also: AWS API Documentation
Exceptions
:example: response = client.start_trigger(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the trigger to start.\n
:rtype: dict
ReturnsResponse Syntax{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --The name of the trigger that was started.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentRunsExceededException
:return: {
'Name': 'string'
}
"""
pass
def start_workflow_run(Name=None):
"""
Starts a new run of the specified workflow.
See also: AWS API Documentation
Exceptions
:example: response = client.start_workflow_run(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the workflow to start.\n
:rtype: dict
ReturnsResponse Syntax{
'RunId': 'string'
}
Response Structure
(dict) --
RunId (string) --An Id for the new run.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.ConcurrentRunsExceededException
:return: {
'RunId': 'string'
}
"""
pass
def stop_crawler(Name=None):
"""
If the specified crawler is running, stops the crawl.
See also: AWS API Documentation
Exceptions
:example: response = client.stop_crawler(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the crawler to stop.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerNotRunningException
Glue.Client.exceptions.CrawlerStoppingException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerNotRunningException
Glue.Client.exceptions.CrawlerStoppingException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def stop_crawler_schedule(CrawlerName=None):
"""
Sets the schedule state of the specified crawler to NOT_SCHEDULED , but does not stop the crawler if it is already running.
See also: AWS API Documentation
Exceptions
:example: response = client.stop_crawler_schedule(
CrawlerName='string'
)
:type CrawlerName: string
:param CrawlerName: [REQUIRED]\nName of the crawler whose schedule state to set.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.SchedulerNotRunningException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.SchedulerNotRunningException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.OperationTimeoutException
"""
pass
def stop_trigger(Name=None):
"""
Stops a specified trigger.
See also: AWS API Documentation
Exceptions
:example: response = client.stop_trigger(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the trigger to stop.\n
:rtype: dict
ReturnsResponse Syntax{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --The name of the trigger that was stopped.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
"""
pass
def stop_workflow_run(Name=None, RunId=None):
"""
Stops the execution of the specified workflow run.
See also: AWS API Documentation
Exceptions
:example: response = client.stop_workflow_run(
Name='string',
RunId='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the workflow to stop.\n
:type RunId: string
:param RunId: [REQUIRED]\nThe ID of the workflow run to stop.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.IllegalWorkflowStateException
:return: {}
:returns:
(dict) --
"""
pass
def tag_resource(ResourceArn=None, TagsToAdd=None):
"""
Adds tags to a resource. A tag is a label you can assign to an AWS resource. In AWS Glue, you can tag only certain resources. For information about what resources you can tag, see AWS Tags in AWS Glue .
See also: AWS API Documentation
Exceptions
:example: response = client.tag_resource(
ResourceArn='string',
TagsToAdd={
'string': 'string'
}
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe ARN of the AWS Glue resource to which to add the tags. For more information about AWS Glue resource ARNs, see the AWS Glue ARN string pattern .\n
:type TagsToAdd: dict
:param TagsToAdd: [REQUIRED]\nTags to add to this resource.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.EntityNotFoundException
:return: {}
:returns:
(dict) --
"""
pass
def untag_resource(ResourceArn=None, TagsToRemove=None):
"""
Removes tags from a resource.
See also: AWS API Documentation
Exceptions
:example: response = client.untag_resource(
ResourceArn='string',
TagsToRemove=[
'string',
]
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource from which to remove the tags.\n
:type TagsToRemove: list
:param TagsToRemove: [REQUIRED]\nTags to remove from this resource.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.EntityNotFoundException
:return: {}
:returns:
(dict) --
"""
pass
def update_classifier(GrokClassifier=None, XMLClassifier=None, JsonClassifier=None, CsvClassifier=None):
"""
Modifies an existing classifier (a GrokClassifier , an XMLClassifier , a JsonClassifier , or a CsvClassifier , depending on which field is present).
See also: AWS API Documentation
Exceptions
:example: response = client.update_classifier(
GrokClassifier={
'Name': 'string',
'Classification': 'string',
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
XMLClassifier={
'Name': 'string',
'Classification': 'string',
'RowTag': 'string'
},
JsonClassifier={
'Name': 'string',
'JsonPath': 'string'
},
CsvClassifier={
'Name': 'string',
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
)
:type GrokClassifier: dict
:param GrokClassifier: A GrokClassifier object with updated fields.\n\nName (string) -- [REQUIRED]The name of the GrokClassifier .\n\nClassification (string) --An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, Amazon CloudWatch Logs, and so on.\n\nGrokPattern (string) --The grok pattern used by this classifier.\n\nCustomPatterns (string) --Optional custom grok patterns used by this classifier.\n\n\n
:type XMLClassifier: dict
:param XMLClassifier: An XMLClassifier object with updated fields.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nClassification (string) --An identifier of the data format that the classifier matches.\n\nRowTag (string) --The XML tag designating the element that contains each record in an XML document being parsed. This cannot identify a self-closing element (closed by /> ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, <row item_a='A' item_b='B'></row> is okay, but <row item_a='A' item_b='B' /> is not).\n\n\n
:type JsonClassifier: dict
:param JsonClassifier: A JsonClassifier object with updated fields.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nJsonPath (string) --A JsonPath string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath , as described in Writing JsonPath Custom Classifiers .\n\n\n
:type CsvClassifier: dict
:param CsvClassifier: A CsvClassifier object with updated fields.\n\nName (string) -- [REQUIRED]The name of the classifier.\n\nDelimiter (string) --A custom symbol to denote what separates each column entry in the row.\n\nQuoteSymbol (string) --A custom symbol to denote what combines content into a single column value. It must be different from the column delimiter.\n\nContainsHeader (string) --Indicates whether the CSV file contains a header.\n\nHeader (list) --A list of strings representing column names.\n\n(string) --\n\n\nDisableValueTrimming (boolean) --Specifies not to trim values before identifying the type of column values. The default value is true.\n\nAllowSingleColumn (boolean) --Enables the processing of files that contain only one column.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.VersionMismatchException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def update_connection(CatalogId=None, Name=None, ConnectionInput=None):
"""
Updates a connection definition in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.update_connection(
CatalogId='string',
Name='string',
ConnectionInput={
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP'|'MONGODB'|'KAFKA',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
}
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the connection resides. If none is provided, the AWS account ID is used by default.
:type Name: string
:param Name: [REQUIRED]\nThe name of the connection definition to update.\n
:type ConnectionInput: dict
:param ConnectionInput: [REQUIRED]\nA ConnectionInput object that redefines the connection in question.\n\nName (string) -- [REQUIRED]The name of the connection.\n\nDescription (string) --The description of the connection.\n\nConnectionType (string) -- [REQUIRED]The type of the connection. Currently, these types are supported:\n\nJDBC - Designates a connection to a database through Java Database Connectivity (JDBC).\nKAFKA - Designates a connection to an Apache Kafka streaming platform.\nMONGODB - Designates a connection to a MongoDB document database.\n\nSFTP is not supported.\n\nMatchCriteria (list) --A list of criteria that can be used in selecting this connection.\n\n(string) --\n\n\nConnectionProperties (dict) -- [REQUIRED]These key-value pairs define parameters for the connection.\n\n(string) --\n(string) --\n\n\n\n\nPhysicalConnectionRequirements (dict) --A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup , that are needed to successfully make this connection.\n\nSubnetId (string) --The subnet ID used by the connection.\n\nSecurityGroupIdList (list) --The security group ID list used by the connection.\n\n(string) --\n\n\nAvailabilityZone (string) --The connection\'s Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def update_crawler(Name=None, Role=None, DatabaseName=None, Description=None, Targets=None, Schedule=None, Classifiers=None, TablePrefix=None, SchemaChangePolicy=None, Configuration=None, CrawlerSecurityConfiguration=None):
"""
Updates a crawler. If a crawler is running, you must stop it using StopCrawler before updating it.
See also: AWS API Documentation
Exceptions
:example: response = client.update_crawler(
Name='string',
Role='string',
DatabaseName='string',
Description='string',
Targets={
'S3Targets': [
{
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'JdbcTargets': [
{
'ConnectionName': 'string',
'Path': 'string',
'Exclusions': [
'string',
]
},
],
'DynamoDBTargets': [
{
'Path': 'string'
},
],
'CatalogTargets': [
{
'DatabaseName': 'string',
'Tables': [
'string',
]
},
]
},
Schedule='string',
Classifiers=[
'string',
],
TablePrefix='string',
SchemaChangePolicy={
'UpdateBehavior': 'LOG'|'UPDATE_IN_DATABASE',
'DeleteBehavior': 'LOG'|'DELETE_FROM_DATABASE'|'DEPRECATE_IN_DATABASE'
},
Configuration='string',
CrawlerSecurityConfiguration='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the new crawler.\n
:type Role: string
:param Role: The IAM role or Amazon Resource Name (ARN) of an IAM role that is used by the new crawler to access customer resources.
:type DatabaseName: string
:param DatabaseName: The AWS Glue database where results are stored, such as: arn:aws:daylight:us-east-1::database/sometable/* .
:type Description: string
:param Description: A description of the new crawler.
:type Targets: dict
:param Targets: A list of targets to crawl.\n\nS3Targets (list) --Specifies Amazon Simple Storage Service (Amazon S3) targets.\n\n(dict) --Specifies a data store in Amazon Simple Storage Service (Amazon S3).\n\nPath (string) --The path to the Amazon S3 target.\n\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n\n(string) --\n\n\n\n\n\n\nJdbcTargets (list) --Specifies JDBC targets.\n\n(dict) --Specifies a JDBC data store to crawl.\n\nConnectionName (string) --The name of the connection to use to connect to the JDBC target.\n\nPath (string) --The path of the JDBC target.\n\nExclusions (list) --A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler .\n\n(string) --\n\n\n\n\n\n\nDynamoDBTargets (list) --Specifies Amazon DynamoDB targets.\n\n(dict) --Specifies an Amazon DynamoDB table to crawl.\n\nPath (string) --The name of the DynamoDB table to crawl.\n\n\n\n\n\nCatalogTargets (list) --Specifies AWS Glue Data Catalog targets.\n\n(dict) --Specifies an AWS Glue Data Catalog target.\n\nDatabaseName (string) -- [REQUIRED]The name of the database to be synchronized.\n\nTables (list) -- [REQUIRED]A list of the tables to be synchronized.\n\n(string) --\n\n\n\n\n\n\n\n
:type Schedule: string
:param Schedule: A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .
:type Classifiers: list
:param Classifiers: A list of custom classifiers that the user has registered. By default, all built-in classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.\n\n(string) --\n\n
:type TablePrefix: string
:param TablePrefix: The table prefix used for catalog tables that are created.
:type SchemaChangePolicy: dict
:param SchemaChangePolicy: The policy for the crawler\'s update and deletion behavior.\n\nUpdateBehavior (string) --The update behavior when the crawler finds a changed schema.\n\nDeleteBehavior (string) --The deletion behavior when the crawler finds a deleted object.\n\n\n
:type Configuration: string
:param Configuration: The crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler\'s behavior. For more information, see Configuring a Crawler .
:type CrawlerSecurityConfiguration: string
:param CrawlerSecurityConfiguration: The name of the SecurityConfiguration structure to be used by this crawler.
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.VersionMismatchException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.CrawlerRunningException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def update_crawler_schedule(CrawlerName=None, Schedule=None):
"""
Updates the schedule of a crawler using a cron expression.
See also: AWS API Documentation
Exceptions
:example: response = client.update_crawler_schedule(
CrawlerName='string',
Schedule='string'
)
:type CrawlerName: string
:param CrawlerName: [REQUIRED]\nThe name of the crawler whose schedule to update.\n
:type Schedule: string
:param Schedule: The updated cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, specify cron(15 12 * * ? *) .
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.VersionMismatchException
Glue.Client.exceptions.SchedulerTransitioningException
Glue.Client.exceptions.OperationTimeoutException
:return: {}
:returns:
(dict) --
"""
pass
def update_database(CatalogId=None, Name=None, DatabaseInput=None):
"""
Updates an existing database definition in a Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.update_database(
CatalogId='string',
Name='string',
DatabaseInput={
'Name': 'string',
'Description': 'string',
'LocationUri': 'string',
'Parameters': {
'string': 'string'
},
'CreateTableDefaultPermissions': [
{
'Principal': {
'DataLakePrincipalIdentifier': 'string'
},
'Permissions': [
'ALL'|'SELECT'|'ALTER'|'DROP'|'DELETE'|'INSERT'|'CREATE_DATABASE'|'CREATE_TABLE'|'DATA_LOCATION_ACCESS',
]
},
]
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog in which the metadata database resides. If none is provided, the AWS account ID is used by default.
:type Name: string
:param Name: [REQUIRED]\nThe name of the database to update in the catalog. For Hive compatibility, this is folded to lowercase.\n
:type DatabaseInput: dict
:param DatabaseInput: [REQUIRED]\nA DatabaseInput object specifying the new definition of the metadata database in the catalog.\n\nName (string) -- [REQUIRED]The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.\n\nDescription (string) --A description of the database.\n\nLocationUri (string) --The location of the database (for example, an HDFS path).\n\nParameters (dict) --These key-value pairs define parameters and properties of the database.\nThese key-value pairs define parameters and properties of the database.\n\n(string) --\n(string) --\n\n\n\n\nCreateTableDefaultPermissions (list) --Creates a set of default permissions on the table for principals.\n\n(dict) --Permissions granted to a principal.\n\nPrincipal (dict) --The principal who is granted permissions.\n\nDataLakePrincipalIdentifier (string) --An identifier for the AWS Lake Formation principal.\n\n\n\nPermissions (list) --The permissions that are granted to the principal.\n\n(string) --\n\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def update_dev_endpoint(EndpointName=None, PublicKey=None, AddPublicKeys=None, DeletePublicKeys=None, CustomLibraries=None, UpdateEtlLibraries=None, DeleteArguments=None, AddArguments=None):
"""
Updates a specified development endpoint.
See also: AWS API Documentation
Exceptions
:example: response = client.update_dev_endpoint(
EndpointName='string',
PublicKey='string',
AddPublicKeys=[
'string',
],
DeletePublicKeys=[
'string',
],
CustomLibraries={
'ExtraPythonLibsS3Path': 'string',
'ExtraJarsS3Path': 'string'
},
UpdateEtlLibraries=True|False,
DeleteArguments=[
'string',
],
AddArguments={
'string': 'string'
}
)
:type EndpointName: string
:param EndpointName: [REQUIRED]\nThe name of the DevEndpoint to be updated.\n
:type PublicKey: string
:param PublicKey: The public key for the DevEndpoint to use.
:type AddPublicKeys: list
:param AddPublicKeys: The list of public keys for the DevEndpoint to use.\n\n(string) --\n\n
:type DeletePublicKeys: list
:param DeletePublicKeys: The list of public keys to be deleted from the DevEndpoint .\n\n(string) --\n\n
:type CustomLibraries: dict
:param CustomLibraries: Custom Python or Java libraries to be loaded in the DevEndpoint .\n\nExtraPythonLibsS3Path (string) --The paths to one or more Python libraries in an Amazon Simple Storage Service (Amazon S3) bucket that should be loaded in your DevEndpoint . Multiple values must be complete paths separated by a comma.\n\nNote\nYou can only use pure Python libraries with a DevEndpoint . Libraries that rely on C extensions, such as the pandas Python data analysis library, are not currently supported.\n\n\nExtraJarsS3Path (string) --The path to one or more Java .jar files in an S3 bucket that should be loaded in your DevEndpoint .\n\nNote\nYou can only use pure Java/Scala libraries with a DevEndpoint .\n\n\n\n
:type UpdateEtlLibraries: boolean
:param UpdateEtlLibraries: True if the list of custom libraries to be loaded in the development endpoint needs to be updated, or False if otherwise.
:type DeleteArguments: list
:param DeleteArguments: The list of argument keys to be deleted from the map of arguments used to configure the DevEndpoint .\n\n(string) --\n\n
:type AddArguments: dict
:param AddArguments: The map of arguments to add the map of arguments used to configure the DevEndpoint .\nValid arguments are:\n\n'--enable-glue-datacatalog': ''\n'GLUE_PYTHON_VERSION': '3'\n'GLUE_PYTHON_VERSION': '2'\n\nYou can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.ValidationException
:return: {}
:returns:
(dict) --
"""
pass
def update_job(JobName=None, JobUpdate=None):
"""
Updates an existing job definition.
See also: AWS API Documentation
Exceptions
:example: response = client.update_job(
JobName='string',
JobUpdate={
'Description': 'string',
'LogUri': 'string',
'Role': 'string',
'ExecutionProperty': {
'MaxConcurrentRuns': 123
},
'Command': {
'Name': 'string',
'ScriptLocation': 'string',
'PythonVersion': 'string'
},
'DefaultArguments': {
'string': 'string'
},
'NonOverridableArguments': {
'string': 'string'
},
'Connections': {
'Connections': [
'string',
]
},
'MaxRetries': 123,
'AllocatedCapacity': 123,
'Timeout': 123,
'MaxCapacity': 123.0,
'WorkerType': 'Standard'|'G.1X'|'G.2X',
'NumberOfWorkers': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'GlueVersion': 'string'
}
)
:type JobName: string
:param JobName: [REQUIRED]\nThe name of the job definition to update.\n
:type JobUpdate: dict
:param JobUpdate: [REQUIRED]\nSpecifies the values with which to update the job definition.\n\nDescription (string) --Description of the job being defined.\n\nLogUri (string) --This field is reserved for future use.\n\nRole (string) --The name or Amazon Resource Name (ARN) of the IAM role associated with this job (required).\n\nExecutionProperty (dict) --An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.\n\nMaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.\n\n\n\nCommand (dict) --The JobCommand that executes this job (required).\n\nName (string) --The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell .\n\nScriptLocation (string) --Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job.\n\nPythonVersion (string) --The Python version being used to execute a Python shell job. Allowed values are 2 or 3.\n\n\n\nDefaultArguments (dict) --The default arguments for this job.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nNonOverridableArguments (dict) --Non-overridable arguments for this job, specified as name-value pairs.\n\n(string) --\n(string) --\n\n\n\n\nConnections (dict) --The connections used for this job.\n\nConnections (list) --A list of connections used by the job.\n\n(string) --\n\n\n\n\nMaxRetries (integer) --The maximum number of times to retry this job if it fails.\n\nAllocatedCapacity (integer) --This field is deprecated. Use MaxCapacity instead.\nThe number of AWS Glue data processing units (DPUs) to allocate to this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\n\nTimeout (integer) --The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).\n\nMaxCapacity (float) --The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nDo not set Max Capacity if using WorkerType and NumberOfWorkers .\nThe value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:\n\nWhen you specify a Python shell job (JobCommand.Name ='pythonshell'), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\nWhen you specify an Apache Spark ETL job (JobCommand.Name ='glueetl'), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.\n\n\nWorkerType (string) --The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\nFor the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.\n\n\nNumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated when a job runs.\nThe maximum number of workers you can define are 299 for G.1X , and 149 for G.2X .\n\nSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this job.\n\nNotificationProperty (dict) --Specifies the configuration properties of a job notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nGlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'JobName': 'string'
}
Response Structure
(dict) --
JobName (string) --
Returns the name of the updated job definition.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'JobName': 'string'
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
"""
pass
def update_ml_transform(TransformId=None, Name=None, Description=None, Parameters=None, Role=None, GlueVersion=None, MaxCapacity=None, WorkerType=None, NumberOfWorkers=None, Timeout=None, MaxRetries=None):
"""
Updates an existing machine learning transform. Call this operation to tune the algorithm parameters to achieve better results.
After calling this operation, you can call the StartMLEvaluationTaskRun operation to assess how well your new parameters achieved your goals (such as improving the quality of your machine learning transform, or making it more cost-effective).
See also: AWS API Documentation
Exceptions
:example: response = client.update_ml_transform(
TransformId='string',
Name='string',
Description='string',
Parameters={
'TransformType': 'FIND_MATCHES',
'FindMatchesParameters': {
'PrimaryKeyColumnName': 'string',
'PrecisionRecallTradeoff': 123.0,
'AccuracyCostTradeoff': 123.0,
'EnforceProvidedLabels': True|False
}
},
Role='string',
GlueVersion='string',
MaxCapacity=123.0,
WorkerType='Standard'|'G.1X'|'G.2X',
NumberOfWorkers=123,
Timeout=123,
MaxRetries=123
)
:type TransformId: string
:param TransformId: [REQUIRED]\nA unique identifier that was generated when the transform was created.\n
:type Name: string
:param Name: The unique name that you gave the transform when you created it.
:type Description: string
:param Description: A description of the transform. The default is an empty string.
:type Parameters: dict
:param Parameters: The configuration parameters that are specific to the transform type (algorithm) used. Conditionally dependent on the transform type.\n\nTransformType (string) -- [REQUIRED]The type of machine learning transform.\nFor information about the types of machine learning transforms, see Creating Machine Learning Transforms .\n\nFindMatchesParameters (dict) --The parameters for the find matches algorithm.\n\nPrimaryKeyColumnName (string) --The name of a column that uniquely identifies rows in the source table. Used to help identify matching records.\n\nPrecisionRecallTradeoff (float) --The value selected when tuning your transform for a balance between precision and recall. A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0 means very low recall, and choosing values close to 0.0 results in very low precision.\nThe precision metric indicates how often your model is correct when it predicts a match.\nThe recall metric indicates that for an actual match, how often your model predicts the match.\n\nAccuracyCostTradeoff (float) --The value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.\nAccuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.\nCost measures how many compute resources, and thus money, are consumed to run the transform.\n\nEnforceProvidedLabels (boolean) --The value to switch on or off to force the output to match the provided labels from users. If the value is True , the find matches transform forces the output to match the provided labels. The results override the normal conflation results. If the value is False , the find matches transform does not ensure all the labels provided are respected, and the results rely on the trained model.\nNote that setting this value to true may increase the conflation execution time.\n\n\n\n\n
:type Role: string
:param Role: The name or Amazon Resource Name (ARN) of the IAM role with the required permissions.
:type GlueVersion: string
:param GlueVersion: This value determines which version of AWS Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see AWS Glue Versions in the developer guide.
:type MaxCapacity: float
:param MaxCapacity: The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page .\nWhen the WorkerType field is set to a value other than Standard , the MaxCapacity field is set automatically and becomes read-only.\n
:type WorkerType: string
:param WorkerType: The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.\n\nFor the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.\nFor the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.\nFor the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.\n\n
:type NumberOfWorkers: integer
:param NumberOfWorkers: The number of workers of a defined workerType that are allocated when this task runs.
:type Timeout: integer
:param Timeout: The timeout for a task run for this transform in minutes. This is the maximum time that a task run for this transform can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
:type MaxRetries: integer
:param MaxRetries: The maximum number of times to retry a task for this transform after a task run fails.
:rtype: dict
ReturnsResponse Syntax
{
'TransformId': 'string'
}
Response Structure
(dict) --
TransformId (string) --
The unique identifier for the transform that was updated.
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.AccessDeniedException
:return: {
'TransformId': 'string'
}
:returns:
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.AccessDeniedException
"""
pass
def update_partition(CatalogId=None, DatabaseName=None, TableName=None, PartitionValueList=None, PartitionInput=None):
"""
Updates a partition.
See also: AWS API Documentation
Exceptions
:example: response = client.update_partition(
CatalogId='string',
DatabaseName='string',
TableName='string',
PartitionValueList=[
'string',
],
PartitionInput={
'Values': [
'string',
],
'LastAccessTime': datetime(2015, 1, 1),
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'Parameters': {
'string': 'string'
},
'LastAnalyzedTime': datetime(2015, 1, 1)
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the partition to be updated resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the table in question resides.\n
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table in which the partition to be updated is located.\n
:type PartitionValueList: list
:param PartitionValueList: [REQUIRED]\nA list of the values defining the partition.\n\n(string) --\n\n
:type PartitionInput: dict
:param PartitionInput: [REQUIRED]\nThe new partition object to update the partition to.\n\nValues (list) --The values of the partition. Although this parameter is not required by the SDK, you must specify this parameter for a valid input.\nThe values for the keys for the new partition must be passed as an array of String objects that must be ordered in the same order as the partition keys appearing in the Amazon S3 prefix. Otherwise AWS Glue will add the values to the wrong keys.\n\n(string) --\n\n\nLastAccessTime (datetime) --The last time at which the partition was accessed.\n\nStorageDescriptor (dict) --Provides information about the physical location where the partition is stored.\n\nColumns (list) --A list of the Columns in the table.\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\nTrue if the data in the table is compressed, or False if not.\n\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --The serialization/deserialization (SerDe) information.\n\nName (string) --Name of the SerDe.\n\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\n\n(dict) --Specifies the sort order of a sorted column.\n\nColumn (string) -- [REQUIRED]The name of the column.\n\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --The user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\nParameters (dict) --These key-value pairs define partition parameters.\n\n(string) --\n(string) --\n\n\n\n\nLastAnalyzedTime (datetime) --The last time at which column statistics were computed for this partition.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def update_table(CatalogId=None, DatabaseName=None, TableInput=None, SkipArchive=None):
"""
Updates a metadata table in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.update_table(
CatalogId='string',
DatabaseName='string',
TableInput={
'Name': 'string',
'Description': 'string',
'Owner': 'string',
'LastAccessTime': datetime(2015, 1, 1),
'LastAnalyzedTime': datetime(2015, 1, 1),
'Retention': 123,
'StorageDescriptor': {
'Columns': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'Location': 'string',
'InputFormat': 'string',
'OutputFormat': 'string',
'Compressed': True|False,
'NumberOfBuckets': 123,
'SerdeInfo': {
'Name': 'string',
'SerializationLibrary': 'string',
'Parameters': {
'string': 'string'
}
},
'BucketColumns': [
'string',
],
'SortColumns': [
{
'Column': 'string',
'SortOrder': 123
},
],
'Parameters': {
'string': 'string'
},
'SkewedInfo': {
'SkewedColumnNames': [
'string',
],
'SkewedColumnValues': [
'string',
],
'SkewedColumnValueLocationMaps': {
'string': 'string'
}
},
'StoredAsSubDirectories': True|False
},
'PartitionKeys': [
{
'Name': 'string',
'Type': 'string',
'Comment': 'string',
'Parameters': {
'string': 'string'
}
},
],
'ViewOriginalText': 'string',
'ViewExpandedText': 'string',
'TableType': 'string',
'Parameters': {
'string': 'string'
}
},
SkipArchive=True|False
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the table resides. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database in which the table resides. For Hive compatibility, this name is entirely lowercase.\n
:type TableInput: dict
:param TableInput: [REQUIRED]\nAn updated TableInput object to define the metadata table in the catalog.\n\nName (string) -- [REQUIRED]The table name. For Hive compatibility, this is folded to lowercase when it is stored.\n\nDescription (string) --A description of the table.\n\nOwner (string) --The table owner.\n\nLastAccessTime (datetime) --The last time that the table was accessed.\n\nLastAnalyzedTime (datetime) --The last time that column statistics were computed for this table.\n\nRetention (integer) --The retention time for this table.\n\nStorageDescriptor (dict) --A storage descriptor containing information about the physical storage of this table.\n\nColumns (list) --A list of the Columns in the table.\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nLocation (string) --The physical location of the table. By default, this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.\n\nInputFormat (string) --The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a custom format.\n\nOutputFormat (string) --The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat , or a custom format.\n\nCompressed (boolean) --\nTrue if the data in the table is compressed, or False if not.\n\nNumberOfBuckets (integer) --Must be specified if the table contains any dimension columns.\n\nSerdeInfo (dict) --The serialization/deserialization (SerDe) information.\n\nName (string) --Name of the SerDe.\n\nSerializationLibrary (string) --Usually the class that implements the SerDe. An example is org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .\n\nParameters (dict) --These key-value pairs define initialization parameters for the SerDe.\n\n(string) --\n(string) --\n\n\n\n\n\n\nBucketColumns (list) --A list of reducer grouping columns, clustering columns, and bucketing columns in the table.\n\n(string) --\n\n\nSortColumns (list) --A list specifying the sort order of each bucket in the table.\n\n(dict) --Specifies the sort order of a sorted column.\n\nColumn (string) -- [REQUIRED]The name of the column.\n\nSortOrder (integer) -- [REQUIRED]Indicates that the column is sorted in ascending order (== 1 ), or in descending order (==0 ).\n\n\n\n\n\nParameters (dict) --The user-supplied properties in key-value form.\n\n(string) --\n(string) --\n\n\n\n\nSkewedInfo (dict) --The information about values that appear frequently in a column (skewed values).\n\nSkewedColumnNames (list) --A list of names of columns that contain skewed values.\n\n(string) --\n\n\nSkewedColumnValues (list) --A list of values that appear so frequently as to be considered skewed.\n\n(string) --\n\n\nSkewedColumnValueLocationMaps (dict) --A mapping of skewed values to the columns that contain them.\n\n(string) --\n(string) --\n\n\n\n\n\n\nStoredAsSubDirectories (boolean) --\nTrue if the table data is stored in subdirectories, or False if not.\n\n\n\nPartitionKeys (list) --A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.\nWhen you create a table used by Amazon Athena, and you do not specify any partitionKeys , you must at least set the value of partitionKeys to an empty list. For example:\n\n'PartitionKeys': []\n\n(dict) --A column in a Table .\n\nName (string) -- [REQUIRED]The name of the Column .\n\nType (string) --The data type of the Column .\n\nComment (string) --A free-form text comment.\n\nParameters (dict) --These key-value pairs define properties associated with the column.\n\n(string) --\n(string) --\n\n\n\n\n\n\n\n\nViewOriginalText (string) --If the table is a view, the original text of the view; otherwise null .\n\nViewExpandedText (string) --If the table is a view, the expanded text of the view; otherwise null .\n\nTableType (string) --The type of this table (EXTERNAL_TABLE , VIRTUAL_VIEW , etc.).\n\nParameters (dict) --These key-value pairs define properties associated with the table.\n\n(string) --\n(string) --\n\n\n\n\n\n
:type SkipArchive: boolean
:param SkipArchive: By default, UpdateTable always creates an archived version of the table before updating it. However, if skipArchive is set to true, UpdateTable does not create the archived version.
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
Glue.Client.exceptions.ResourceNumberLimitExceededException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def update_trigger(Name=None, TriggerUpdate=None):
"""
Updates a trigger definition.
See also: AWS API Documentation
Exceptions
:example: response = client.update_trigger(
Name='string',
TriggerUpdate={
'Name': 'string',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the trigger to update.\n
:type TriggerUpdate: dict
:param TriggerUpdate: [REQUIRED]\nThe new values with which to update the trigger.\n\nName (string) --Reserved for future use.\n\nDescription (string) --A description of this trigger.\n\nSchedule (string) --A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .\n\nActions (list) --The actions initiated by this trigger.\n\n(dict) --Defines an action to be initiated by a trigger.\n\nJobName (string) --The name of a job to be executed.\n\nArguments (dict) --The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.\nYou can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.\nFor information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.\nFor information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.\n\n(string) --\n(string) --\n\n\n\n\nTimeout (integer) --The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.\n\nSecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this action.\n\nNotificationProperty (dict) --Specifies configuration properties of a job run notification.\n\nNotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification.\n\n\n\nCrawlerName (string) --The name of the crawler to be used with this action.\n\n\n\n\n\nPredicate (dict) --The predicate of this trigger, which defines when it will fire.\n\nLogical (string) --An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.\n\nConditions (list) --A list of the conditions that determine when the trigger will fire.\n\n(dict) --Defines a condition under which a trigger fires.\n\nLogicalOperator (string) --A logical operator.\n\nJobName (string) --The name of the job whose JobRuns this condition applies to, and on which this trigger waits.\n\nState (string) --The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .\n\nCrawlerName (string) --The name of the crawler to which this condition applies.\n\nCrawlState (string) --The state of the crawler to which this condition applies.\n\n\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
}
Response Structure
(dict) --
Trigger (dict) --
The resulting trigger definition.
Name (string) --
The name of the trigger.
WorkflowName (string) --
The name of the workflow associated with the trigger.
Id (string) --
Reserved for future use.
Type (string) --
The type of trigger that this is.
State (string) --
The current state of the trigger.
Description (string) --
A description of this trigger.
Schedule (string) --
A cron expression used to specify the schedule (see Time-Based Schedules for Jobs and Crawlers . For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *) .
Actions (list) --
The actions initiated by this trigger.
(dict) --
Defines an action to be initiated by a trigger.
JobName (string) --
The name of a job to be executed.
Arguments (dict) --
The job arguments used when this trigger fires. For this job run, they replace the default arguments set in the job definition itself.
You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes.
For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide.
For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide.
(string) --
(string) --
Timeout (integer) --
The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
SecurityConfiguration (string) --
The name of the SecurityConfiguration structure to be used with this action.
NotificationProperty (dict) --
Specifies configuration properties of a job run notification.
NotifyDelayAfter (integer) --
After a job run starts, the number of minutes to wait before sending a job run delay notification.
CrawlerName (string) --
The name of the crawler to be used with this action.
Predicate (dict) --
The predicate of this trigger, which defines when it will fire.
Logical (string) --
An optional field if only one condition is listed. If multiple conditions are listed, then this field is required.
Conditions (list) --
A list of the conditions that determine when the trigger will fire.
(dict) --
Defines a condition under which a trigger fires.
LogicalOperator (string) --
A logical operator.
JobName (string) --
The name of the job whose JobRuns this condition applies to, and on which this trigger waits.
State (string) --
The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
CrawlerName (string) --
The name of the crawler to which this condition applies.
CrawlState (string) --
The state of the crawler to which this condition applies.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Trigger': {
'Name': 'string',
'WorkflowName': 'string',
'Id': 'string',
'Type': 'SCHEDULED'|'CONDITIONAL'|'ON_DEMAND',
'State': 'CREATING'|'CREATED'|'ACTIVATING'|'ACTIVATED'|'DEACTIVATING'|'DEACTIVATED'|'DELETING'|'UPDATING',
'Description': 'string',
'Schedule': 'string',
'Actions': [
{
'JobName': 'string',
'Arguments': {
'string': 'string'
},
'Timeout': 123,
'SecurityConfiguration': 'string',
'NotificationProperty': {
'NotifyDelayAfter': 123
},
'CrawlerName': 'string'
},
],
'Predicate': {
'Logical': 'AND'|'ANY',
'Conditions': [
{
'LogicalOperator': 'EQUALS',
'JobName': 'string',
'State': 'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT',
'CrawlerName': 'string',
'CrawlState': 'RUNNING'|'CANCELLING'|'CANCELLED'|'SUCCEEDED'|'FAILED'
},
]
}
}
}
:returns:
(string) --
(string) --
"""
pass
def update_user_defined_function(CatalogId=None, DatabaseName=None, FunctionName=None, FunctionInput=None):
"""
Updates an existing function definition in the Data Catalog.
See also: AWS API Documentation
Exceptions
:example: response = client.update_user_defined_function(
CatalogId='string',
DatabaseName='string',
FunctionName='string',
FunctionInput={
'FunctionName': 'string',
'ClassName': 'string',
'OwnerName': 'string',
'OwnerType': 'USER'|'ROLE'|'GROUP',
'ResourceUris': [
{
'ResourceType': 'JAR'|'FILE'|'ARCHIVE',
'Uri': 'string'
},
]
}
)
:type CatalogId: string
:param CatalogId: The ID of the Data Catalog where the function to be updated is located. If none is provided, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: [REQUIRED]\nThe name of the catalog database where the function to be updated is located.\n
:type FunctionName: string
:param FunctionName: [REQUIRED]\nThe name of the function.\n
:type FunctionInput: dict
:param FunctionInput: [REQUIRED]\nA FunctionInput object that redefines the function in the Data Catalog.\n\nFunctionName (string) --The name of the function.\n\nClassName (string) --The Java class that contains the function code.\n\nOwnerName (string) --The owner of the function.\n\nOwnerType (string) --The owner type.\n\nResourceUris (list) --The resource URIs for the function.\n\n(dict) --The URIs for function resources.\n\nResourceType (string) --The type of the resource.\n\nUri (string) --The URI for accessing the resource.\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.GlueEncryptionException
:return: {}
:returns:
(dict) --
"""
pass
def update_workflow(Name=None, Description=None, DefaultRunProperties=None):
"""
Updates an existing workflow.
See also: AWS API Documentation
Exceptions
:example: response = client.update_workflow(
Name='string',
Description='string',
DefaultRunProperties={
'string': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]\nName of the workflow to be updated.\n
:type Description: string
:param Description: The description of the workflow.
:type DefaultRunProperties: dict
:param DefaultRunProperties: A collection of properties to be used as part of each execution of the workflow.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Name': 'string'
}
Response Structure
(dict) --
Name (string) --
The name of the workflow which was specified in input.
Exceptions
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
:return: {
'Name': 'string'
}
:returns:
Glue.Client.exceptions.InvalidInputException
Glue.Client.exceptions.EntityNotFoundException
Glue.Client.exceptions.InternalServiceException
Glue.Client.exceptions.OperationTimeoutException
Glue.Client.exceptions.ConcurrentModificationException
"""
pass
| mit |
shangwuhencc/scikit-learn | examples/linear_model/plot_ransac.py | 249 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
CredoReference/edx-platform | lms/djangoapps/course_api/blocks/tests/test_serializers.py | 4 | 8709 | """
Tests for Course Blocks serializers
"""
from mock import MagicMock
from lms.djangoapps.course_blocks.api import get_course_block_access_transformers, get_course_blocks
from openedx.core.djangoapps.content.block_structure.transformers import BlockStructureTransformers
from student.roles import CourseStaffRole
from student.tests.factories import UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import ToyCourseFactory
from ..serializers import BlockDictSerializer, BlockSerializer
from ..transformers.blocks_api import BlocksAPITransformer
from .helpers import deserialize_usage_key
class TestBlockSerializerBase(SharedModuleStoreTestCase):
"""
Base class for testing BlockSerializer and BlockDictSerializer
"""
shard = 4
@classmethod
def setUpClass(cls):
super(TestBlockSerializerBase, cls).setUpClass()
cls.course = ToyCourseFactory.create()
# Hide the html block
key = cls.course.id.make_usage_key('html', 'secret:toylab')
cls.html_block = cls.store.get_item(key)
cls.html_block.visible_to_staff_only = True
cls.store.update_item(cls.html_block, ModuleStoreEnum.UserID.test)
def setUp(self):
super(TestBlockSerializerBase, self).setUp()
self.user = UserFactory.create()
blocks_api_transformer = BlocksAPITransformer(
block_types_to_count=['video'],
requested_student_view_data=['video'],
)
self.transformers = BlockStructureTransformers(
get_course_block_access_transformers() + [blocks_api_transformer]
)
self.block_structure = get_course_blocks(
self.user,
self.course.location,
self.transformers,
)
self.serializer_context = {
'request': MagicMock(),
'block_structure': self.block_structure,
'requested_fields': ['type'],
}
def assert_basic_block(self, block_key_string, serialized_block):
"""
Verifies the given serialized_block when basic fields are requested.
"""
block_key = deserialize_usage_key(block_key_string, self.course.id)
self.assertEquals(
self.block_structure.get_xblock_field(block_key, 'category'),
serialized_block['type'],
)
self.assertEquals(
set(serialized_block.iterkeys()),
{'id', 'block_id', 'type', 'lms_web_url', 'student_view_url'},
)
def add_additional_requested_fields(self, context=None):
"""
Adds additional fields to the requested_fields context for the serializer.
"""
if context is None:
context = self.serializer_context
context['requested_fields'].extend([
'children',
'display_name',
'graded',
'format',
'block_counts',
'student_view_data',
'student_view_multi_device',
'lti_url',
'visible_to_staff_only',
])
def assert_extended_block(self, serialized_block):
"""
Verifies the given serialized_block when additional fields are requested.
"""
self.assertLessEqual(
{
'id', 'type', 'lms_web_url', 'student_view_url',
'display_name', 'graded',
'student_view_multi_device',
'lti_url',
'visible_to_staff_only',
},
set(serialized_block.iterkeys()),
)
# video blocks should have student_view_data
if serialized_block['type'] == 'video':
self.assertIn('student_view_data', serialized_block)
# html blocks should have student_view_multi_device set to True
if serialized_block['type'] == 'html':
self.assertIn('student_view_multi_device', serialized_block)
self.assertTrue(serialized_block['student_view_multi_device'])
# chapters with video should have block_counts
if serialized_block['type'] == 'chapter':
if serialized_block['display_name'] not in ('poll_test', 'handout_container'):
self.assertIn('block_counts', serialized_block)
else:
self.assertNotIn('block_counts', serialized_block)
def create_staff_context(self):
"""
Create staff user and course blocks accessible by that user
"""
# Create a staff user to be able to test visible_to_staff_only
staff_user = UserFactory.create()
CourseStaffRole(self.course.location.course_key).add_users(staff_user)
block_structure = get_course_blocks(
staff_user,
self.course.location,
self.transformers,
)
return {
'request': MagicMock(),
'block_structure': block_structure,
'requested_fields': ['type'],
}
def assert_staff_fields(self, serialized_block):
"""
Test fields accessed by a staff user
"""
if serialized_block['id'] == unicode(self.html_block.location):
self.assertTrue(serialized_block['visible_to_staff_only'])
else:
self.assertFalse(serialized_block['visible_to_staff_only'])
class TestBlockSerializer(TestBlockSerializerBase):
"""
Tests the BlockSerializer class, which returns a list of blocks.
"""
shard = 4
def create_serializer(self, context=None):
"""
creates a BlockSerializer
"""
if context is None:
context = self.serializer_context
return BlockSerializer(
context['block_structure'], many=True, context=context,
)
def test_basic(self):
serializer = self.create_serializer()
for serialized_block in serializer.data:
self.assert_basic_block(serialized_block['id'], serialized_block)
self.assertEquals(len(serializer.data), 28)
def test_additional_requested_fields(self):
self.add_additional_requested_fields()
serializer = self.create_serializer()
for serialized_block in serializer.data:
self.assert_extended_block(serialized_block)
self.assertEquals(len(serializer.data), 28)
def test_staff_fields(self):
"""
Test fields accessed by a staff user
"""
context = self.create_staff_context()
self.add_additional_requested_fields(context)
serializer = self.create_serializer(context)
for serialized_block in serializer.data:
self.assert_extended_block(serialized_block)
self.assert_staff_fields(serialized_block)
self.assertEquals(len(serializer.data), 29)
class TestBlockDictSerializer(TestBlockSerializerBase):
"""
Tests the BlockDictSerializer class, which returns a dict of blocks key-ed by its block_key.
"""
shard = 4
def create_serializer(self, context=None):
"""
creates a BlockDictSerializer
"""
if context is None:
context = self.serializer_context
return BlockDictSerializer(
context['block_structure'], many=False, context=context,
)
def test_basic(self):
serializer = self.create_serializer()
# verify root
self.assertEquals(serializer.data['root'], unicode(self.block_structure.root_block_usage_key))
# verify blocks
for block_key_string, serialized_block in serializer.data['blocks'].iteritems():
self.assertEquals(serialized_block['id'], block_key_string)
self.assert_basic_block(block_key_string, serialized_block)
self.assertEquals(len(serializer.data['blocks']), 28)
def test_additional_requested_fields(self):
self.add_additional_requested_fields()
serializer = self.create_serializer()
for serialized_block in serializer.data['blocks'].itervalues():
self.assert_extended_block(serialized_block)
self.assertEquals(len(serializer.data['blocks']), 28)
def test_staff_fields(self):
"""
Test fields accessed by a staff user
"""
context = self.create_staff_context()
self.add_additional_requested_fields(context)
serializer = self.create_serializer(context)
for serialized_block in serializer.data['blocks'].itervalues():
self.assert_extended_block(serialized_block)
self.assert_staff_fields(serialized_block)
self.assertEquals(len(serializer.data['blocks']), 29)
| agpl-3.0 |
nicproulx/mne-python | tutorials/plot_artifacts_detection.py | 5 | 5377 | """
.. _tut_artifacts_detect:
Introduction to artifacts and artifact detection
================================================
Since MNE supports the data of many different acquisition systems, the
particular artifacts in your data might behave very differently from the
artifacts you can observe in our tutorials and examples.
Therefore you should be aware of the different approaches and of
the variability of artifact rejection (automatic/manual) procedures described
onwards. At the end consider always to visually inspect your data
after artifact rejection or correction.
Background: what is an artifact?
--------------------------------
Artifacts are signal interference that can be
endogenous (biological) and exogenous (environmental).
Typical biological artifacts are head movements, eye blinks
or eye movements, heart beats. The most common environmental
artifact is due to the power line, the so-called *line noise*.
How to handle artifacts?
------------------------
MNE deals with artifacts by first identifying them, and subsequently removing
them. Detection of artifacts can be done visually, or using automatic routines
(or a combination of both). After you know what the artifacts are, you need
remove them. This can be done by:
- *ignoring* the piece of corrupted data
- *fixing* the corrupted data
For the artifact detection the functions MNE provides depend on whether
your data is continuous (Raw) or epoch-based (Epochs) and depending on
whether your data is stored on disk or already in memory.
Detecting the artifacts without reading the complete data into memory allows
you to work with datasets that are too large to fit in memory all at once.
Detecting the artifacts in continuous data allows you to apply filters
(e.g. a band-pass filter to zoom in on the muscle artifacts on the temporal
channels) without having to worry about edge effects due to the filter
(i.e. filter ringing). Having the data in memory after segmenting/epoching is
however a very efficient way of browsing through the data which helps
in visualizing. So to conclude, there is not a single most optimal manner
to detect the artifacts: it just depends on the data properties and your
own preferences.
In this tutorial we show how to detect artifacts visually and automatically.
For how to correct artifacts by rejection see :ref:`tut_artifacts_reject`.
To discover how to correct certain artifacts by filtering see
:ref:`tut_artifacts_filter` and to learn how to correct artifacts
with subspace methods like SSP and ICA see :ref:`tut_artifacts_correct_ssp`
and :ref:`tut_artifacts_correct_ica`.
Artifacts Detection
-------------------
This tutorial discusses a couple of major artifacts that most analyses
have to deal with and demonstrates how to detect them.
"""
import numpy as np
import mne
from mne.datasets import sample
from mne.preprocessing import create_ecg_epochs, create_eog_epochs
# getting some data ready
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
###############################################################################
# Low frequency drifts and line noise
(raw.copy().pick_types(meg='mag')
.del_proj(0)
.plot(duration=60, n_channels=100, remove_dc=False))
###############################################################################
# we see high amplitude undulations in low frequencies, spanning across tens of
# seconds
raw.plot_psd(tmax=np.inf, fmax=250)
###############################################################################
# On MEG sensors we see narrow frequency peaks at 60, 120, 180, 240 Hz,
# related to line noise.
# But also some high amplitude signals between 25 and 32 Hz, hinting at other
# biological artifacts such as ECG. These can be most easily detected in the
# time domain using MNE helper functions
#
# See :ref:`tut_artifacts_filter`.
###############################################################################
# ECG
# ---
#
# finds ECG events, creates epochs, averages and plots
average_ecg = create_ecg_epochs(raw).average()
print('We found %i ECG events' % average_ecg.nave)
average_ecg.plot_joint()
###############################################################################
# we can see typical time courses and non dipolar topographies
# not the order of magnitude of the average artifact related signal and
# compare this to what you observe for brain signals
###############################################################################
# EOG
# ---
average_eog = create_eog_epochs(raw).average()
print('We found %i EOG events' % average_eog.nave)
average_eog.plot_joint()
###############################################################################
# Knowing these artifact patterns is of paramount importance when
# judging about the quality of artifact removal techniques such as SSP or ICA.
# As a rule of thumb you need artifact amplitudes orders of magnitude higher
# than your signal of interest and you need a few of such events in order
# to find decompositions that allow you to estimate and remove patterns related
# to artifacts.
#
# Consider the following tutorials for correcting this class of artifacts:
# - :ref:`tut_artifacts_filter`
# - :ref:`tut_artifacts_correct_ica`
# - :ref:`tut_artifacts_correct_ssp`
| bsd-3-clause |
shangwuhencc/scikit-learn | examples/exercises/plot_iris_exercise.py | 320 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
NUKnightLab/panda | panda/migrations/0025_add_subscription_permissions.py | 6 | 14192 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""
This migration will fail if run against a clean database (fresh setup)
This is fine because the permission will be installed from the fixture.
"""
try:
group = orm['auth.group'].objects.get(name='panda_user')
perm = orm['auth.permission'].objects.get(codename='add_searchsubscription')
group.permissions.add(perm)
perm = orm['auth.permission'].objects.get(codename='delete_searchsubscription')
group.permissions.add(perm)
except:
pass
def backwards(self, orm):
group = orm['auth.group'].objects.get(name='panda_user')
perm = orm['auth.permission'].objects.get(codename='add_searchsubscription')
group.permissions.remove(perm)
perm = orm['auth.permission'].objects.get(codename='delete_searchsubscription')
group.permissions.remove(perm)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'panda.activitylog': {
'Meta': {'unique_together': "(('user', 'when'),)", 'object_name': 'ActivityLog'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_logs'", 'to': "orm['auth.User']"}),
'when': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'panda.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256'})
},
'panda.dataset': {
'Meta': {'ordering': "['-creation_date']", 'object_name': 'Dataset'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['panda.Category']"}),
'column_schema': ('panda.fields.JSONField', [], {'default': 'None', 'null': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datasets'", 'to': "orm['auth.User']"}),
'current_task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['panda.TaskStatus']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_upload': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'initial_upload_for'", 'null': 'True', 'to': "orm['panda.DataUpload']"}),
'last_modification': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'row_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'sample_data': ('panda.fields.JSONField', [], {'default': 'None', 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256'})
},
'panda.dataupload': {
'Meta': {'ordering': "['creation_date']", 'object_name': 'DataUpload'},
'columns': ('panda.fields.JSONField', [], {'null': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'data_type': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data_uploads'", 'null': 'True', 'to': "orm['panda.Dataset']"}),
'dialect': ('panda.fields.JSONField', [], {'null': 'True'}),
'encoding': ('django.db.models.fields.CharField', [], {'default': "'utf-8'", 'max_length': '32'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'guessed_types': ('panda.fields.JSONField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imported': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'sample_data': ('panda.fields.JSONField', [], {'null': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'panda.export': {
'Meta': {'ordering': "['creation_date']", 'object_name': 'Export'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exports'", 'null': 'True', 'to': "orm['panda.Dataset']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'panda.notification': {
'Meta': {'ordering': "['-sent_at']", 'object_name': 'Notification'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'read_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notifications'", 'to': "orm['auth.User']"}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Info'", 'max_length': '16'}),
'url': ('django.db.models.fields.URLField', [], {'default': 'None', 'max_length': '200', 'null': 'True'})
},
'panda.relatedupload': {
'Meta': {'ordering': "['creation_date']", 'object_name': 'RelatedUpload'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_uploads'", 'to': "orm['panda.Dataset']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'panda.searchlog': {
'Meta': {'object_name': 'SearchLog'},
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'searches'", 'null': 'True', 'to': "orm['panda.Dataset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'query': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'search_logs'", 'to': "orm['auth.User']"}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'panda.searchsubscription': {
'Meta': {'object_name': 'SearchSubscription'},
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'subscribed_searches'", 'null': 'True', 'to': "orm['panda.Dataset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_run': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'query': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'query_human': ('django.db.models.fields.TextField', [], {}),
'query_url': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscribed_searches'", 'to': "orm['auth.User']"})
},
'panda.taskstatus': {
'Meta': {'object_name': 'TaskStatus'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tasks'", 'null': 'True', 'to': "orm['auth.User']"}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '50'}),
'task_description': ('django.db.models.fields.TextField', [], {}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'traceback': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'panda.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'activation_key_expiration': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['panda']
symmetrical = True
| mit |
nicproulx/mne-python | tutorials/plot_stats_cluster_methods.py | 5 | 8797 | # doc:slow-example
"""
.. _tut_stats_cluster_methods:
======================================================
Permutation t-test on toy data with spatial clustering
======================================================
Following the illustrative example of Ridgway et al. 2012 [1]_,
this demonstrates some basic ideas behind both the "hat"
variance adjustment method, as well as threshold-free
cluster enhancement (TFCE) [2]_ methods in mne-python.
This toy dataset consists of a 40 x 40 square with a "signal"
present in the center (at pixel [20, 20]) with white noise
added and a 5-pixel-SD normal smoothing kernel applied.
In the top row plot the T statistic over space, peaking toward the
center. Note that it has peaky edges. Second, with the "hat" variance
correction/regularization, the peak becomes correctly centered. Third,
the TFCE approach also corrects for these edge artifacts. Fourth, the
the two methods combined provide a tighter estimate, for better or
worse.
Now considering multiple-comparisons corrected statistics on these
variables, note that a non-cluster test (e.g., FDR or Bonferroni) would
mis-localize the peak due to sharpness in the T statistic driven by
low-variance pixels toward the edge of the plateau. Standard clustering
(first plot in the second row) identifies the correct region, but the
whole area must be declared significant, so no peak analysis can be done.
Also, the peak is broad. In this method, all significances are
family-wise error rate (FWER) corrected, and the method is
non-parametric so assumptions of Gaussian data distributions (which do
actually hold for this example) don't need to be satisfied. Adding the
"hat" technique tightens the estimate of significant activity (second
plot). The TFCE approach (third plot) allows analyzing each significant
point independently, but still has a broadened estimate. Note that
this is also FWER corrected. Finally, combining the TFCE and "hat"
methods tightens the area declared significant (again FWER corrected),
and allows for evaluation of each point independently instead of as
a single, broad cluster.
.. note:: This example does quite a bit of processing, so even on a
fast machine it can take a few minutes to complete.
"""
# Authors: Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import numpy as np
from scipy import stats
from functools import partial
import matplotlib.pyplot as plt
# this changes hidden MPL vars:
from mpl_toolkits.mplot3d import Axes3D # noqa
from mne.stats import (spatio_temporal_cluster_1samp_test,
bonferroni_correction, ttest_1samp_no_p)
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
print(__doc__)
###############################################################################
# Set parameters
# --------------
width = 40
n_subjects = 10
signal_mean = 100
signal_sd = 100
noise_sd = 0.01
gaussian_sd = 5
sigma = 1e-3 # sigma for the "hat" method
threshold = -stats.distributions.t.ppf(0.05, n_subjects - 1)
threshold_tfce = dict(start=0, step=0.2)
n_permutations = 1024 # number of clustering permutations (1024 for exact)
###############################################################################
# Construct simulated data
# ------------------------
#
# Make the connectivity matrix just next-neighbor spatially
n_src = width * width
connectivity = grid_to_graph(width, width)
# For each "subject", make a smoothed noisy signal with a centered peak
rng = np.random.RandomState(42)
X = noise_sd * rng.randn(n_subjects, width, width)
# Add a signal at the dead center
X[:, width // 2, width // 2] = signal_mean + rng.randn(n_subjects) * signal_sd
# Spatially smooth with a 2D Gaussian kernel
size = width // 2 - 1
gaussian = np.exp(-(np.arange(-size, size + 1) ** 2 / float(gaussian_sd ** 2)))
for si in range(X.shape[0]):
for ri in range(X.shape[1]):
X[si, ri, :] = np.convolve(X[si, ri, :], gaussian, 'same')
for ci in range(X.shape[2]):
X[si, :, ci] = np.convolve(X[si, :, ci], gaussian, 'same')
###############################################################################
# Do some statistics
# ------------------
#
# .. note::
# X needs to be a multi-dimensional array of shape
# samples (subjects) x time x space, so we permute dimensions:
X = X.reshape((n_subjects, 1, n_src))
###############################################################################
# Now let's do some clustering using the standard method.
#
# .. note::
# Not specifying a connectivity matrix implies grid-like connectivity,
# which we want here:
T_obs, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold,
connectivity=connectivity,
tail=1, n_permutations=n_permutations)
# Let's put the cluster data in a readable format
ps = np.zeros(width * width)
for cl, p in zip(clusters, p_values):
ps[cl[1]] = -np.log10(p)
ps = ps.reshape((width, width))
T_obs = T_obs.reshape((width, width))
# To do a Bonferroni correction on these data is simple:
p = stats.distributions.t.sf(T_obs, n_subjects - 1)
p_bon = -np.log10(bonferroni_correction(p)[1])
# Now let's do some clustering using the standard method with "hat":
stat_fun = partial(ttest_1samp_no_p, sigma=sigma)
T_obs_hat, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold,
connectivity=connectivity,
tail=1, n_permutations=n_permutations,
stat_fun=stat_fun, buffer_size=None)
# Let's put the cluster data in a readable format
ps_hat = np.zeros(width * width)
for cl, p in zip(clusters, p_values):
ps_hat[cl[1]] = -np.log10(p)
ps_hat = ps_hat.reshape((width, width))
T_obs_hat = T_obs_hat.reshape((width, width))
# Now the threshold-free cluster enhancement method (TFCE):
T_obs_tfce, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold_tfce,
connectivity=connectivity,
tail=1, n_permutations=n_permutations)
T_obs_tfce = T_obs_tfce.reshape((width, width))
ps_tfce = -np.log10(p_values.reshape((width, width)))
# Now the TFCE with "hat" variance correction:
T_obs_tfce_hat, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=1, threshold=threshold_tfce,
connectivity=connectivity,
tail=1, n_permutations=n_permutations,
stat_fun=stat_fun, buffer_size=None)
T_obs_tfce_hat = T_obs_tfce_hat.reshape((width, width))
ps_tfce_hat = -np.log10(p_values.reshape((width, width)))
###############################################################################
# Visualize results
# -----------------
fig = plt.figure(facecolor='w')
x, y = np.mgrid[0:width, 0:width]
kwargs = dict(rstride=1, cstride=1, linewidth=0, cmap='Greens')
Ts = [T_obs, T_obs_hat, T_obs_tfce, T_obs_tfce_hat]
titles = ['T statistic', 'T with "hat"', 'TFCE statistic', 'TFCE w/"hat" stat']
for ii, (t, title) in enumerate(zip(Ts, titles)):
ax = fig.add_subplot(2, 4, ii + 1, projection='3d')
ax.plot_surface(x, y, t, **kwargs)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(title)
p_lims = [1.3, -np.log10(1.0 / n_permutations)]
pvals = [ps, ps_hat, ps_tfce, ps_tfce_hat]
titles = ['Standard clustering', 'Clust. w/"hat"',
'Clust. w/TFCE', 'Clust. w/TFCE+"hat"']
axs = []
for ii, (p, title) in enumerate(zip(pvals, titles)):
ax = fig.add_subplot(2, 4, 5 + ii)
plt.imshow(p, cmap='Purples', vmin=p_lims[0], vmax=p_lims[1])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(title)
axs.append(ax)
plt.tight_layout()
for ax in axs:
cbar = plt.colorbar(ax=ax, shrink=0.75, orientation='horizontal',
fraction=0.1, pad=0.025)
cbar.set_label('-log10(p)')
cbar.set_ticks(p_lims)
cbar.set_ticklabels(['%0.1f' % p for p in p_lims])
plt.show()
###############################################################################
# References
# ----------
# .. [1] Ridgway et al. 2012, "The problem of low variance voxels in
# statistical parametric mapping; a new hat avoids a 'haircut'",
# NeuroImage. 2012 Feb 1;59(3):2131-41.
#
# .. [2] Smith and Nichols 2009, "Threshold-free cluster enhancement:
# addressing problems of smoothing, threshold dependence, and
# localisation in cluster inference", NeuroImage 44 (2009) 83-98.
| bsd-3-clause |
kastnerkyle/pylearn2 | pylearn2/tests/test_theano.py | 45 | 4805 | """ Include tests related to Theano.
1) One test on one thing Pylearn2 depend to be done by Theano.
2) One test for a rare corner case crash in Theano that we where not
able to reproduce rapidly enough without having this tests depend on
Pylearn2.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import numpy as np
import theano
from theano import tensor as T
import pylearn2
from pylearn2.config import yaml_parse
from pylearn2.testing.skip import skip_if_no_gpu
def test_grad():
"""Tests that the theano grad method returns a list if it is passed a list
and a single variable if it is passed a single variable.
pylearn2 depends on theano behaving this way but theano developers have
repeatedly changed it """
X = T.matrix()
y = X.sum()
G = T.grad(y, [X])
assert isinstance(G, list)
G = T.grad(y, X)
assert not isinstance(G, list)
def test_biglayer():
"""Test a crash during Theano compilation. It would be too long to
redo this test without depending on Pylearn2. So we put it
here.
"""
skip_if_no_gpu()
yaml_string = """
!obj:pylearn2.train.Train {
dataset: &train
!obj:pylearn2.testing.datasets.random_one_hot_topological_dense_design_matrix {
rng: !obj:numpy.random.RandomState { seed: [2014, 6, 6] },
shape: &input_shape [%(xsize)i, %(ysize)i],
channels: 4,
axes: ['c', 0, 1, 'b'],
num_examples: 128,
num_classes: 10
},
model: !obj:pylearn2.models.mlp.MLP {
batch_size: 128,
layers: [
!obj:pylearn2.models.mlp.FlattenerLayer {
raw_layer: !obj:pylearn2.models.mlp.CompositeLayer {
layer_name: 'h0',
layers: [
!obj:pylearn2.models.mlp.MLP {
layer_name: 'h1',
layers: [
!obj:pylearn2.models.maxout.MaxoutConvC01B {
layer_name: 'conv00',
tied_b: 1,
W_lr_scale: .05,
b_lr_scale: .05,
num_channels: 16,
num_pieces: 1,
kernel_shape: [1, 1],
pool_shape: [4, 4],
pool_stride: [4, 4],
irange: .005,
max_kernel_norm: 0.9,
}
]},
!obj:pylearn2.models.maxout.Maxout {
layer_name: 'max0',
W_lr_scale: .1,
b_lr_scale: .1,
num_units: 16,
irange: .005,
max_col_norm: 1.9365,
num_pieces: 1,
}
]
}
},
!obj:pylearn2.models.mlp.Softmax {
max_col_norm: 1.9365,
layer_name: 'y',
n_classes: 10,
irange: .005
}
],
input_space: !obj:pylearn2.space.Conv2DSpace {
shape: *input_shape,
num_channels: 4,
axes: ['c', 0, 1, 'b'],
},
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
learning_rate: .05,
learning_rule: !obj:pylearn2.training_algorithms.learning_rule.Momentum {
init_momentum: 0.5,
},
monitoring_dataset:
{
'train': *train
},
termination_criterion: !obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 3
},
},
extensions: [
!obj:pylearn2.training_algorithms.learning_rule.MomentumAdjustor {
start: 1,
saturate: 250,
final_momentum: .7
}
]
}
"""
try:
orig_floatX = theano.config.floatX
theano.config.floatX = 'float32'
theano.sandbox.cuda.use('gpu')
x_size, y_size = 4, 4
parameters = {'xsize': x_size, 'ysize': y_size}
test = yaml_parse.load(yaml_string % parameters)
test.main_loop()
finally:
theano.config.floatX = orig_floatX
theano.sandbox.cuda.unuse()
| bsd-3-clause |
gacarrillor/QGIS | tests/src/python/featuresourcetestbase.py | 8 | 57101 | # -*- coding: utf-8 -*-
"""QGIS Unit test utils for QgsFeatureSource subclasses.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import str
from builtins import object
__author__ = 'Nyall Dawson'
__date__ = '2017-05-25'
__copyright__ = 'Copyright 2017, The QGIS Project'
from qgis.core import (
QgsRectangle,
QgsFeatureRequest,
QgsFeature,
QgsWkbTypes,
QgsProject,
QgsGeometry,
QgsAbstractFeatureIterator,
QgsExpressionContextScope,
QgsExpressionContext,
QgsVectorLayerFeatureSource,
QgsCoordinateReferenceSystem,
NULL
)
from qgis.PyQt.QtCore import QDate, QTime, QDateTime
from utilities import compareWkt
class FeatureSourceTestCase(object):
"""
This is a collection of tests for QgsFeatureSources subclasses and kept generic.
To make use of it, subclass it and set self.source to a QgsFeatureSource you want to test.
Make sure that your source uses the default dataset by converting one of the provided datasets from the folder
tests/testdata/source to a dataset your source is able to handle.
"""
def treat_date_as_datetime(self):
return False
def treat_datetime_as_string(self):
return False
def treat_date_as_string(self):
return False
def treat_time_as_string(self):
return False
def testCrs(self):
self.assertEqual(self.source.sourceCrs().authid(), 'EPSG:4326')
def testWkbType(self):
self.assertEqual(self.source.wkbType(), QgsWkbTypes.Point)
def testFeatureCount(self):
self.assertEqual(self.source.featureCount(), 5)
self.assertEqual(len(self.source), 5)
def testFields(self):
fields = self.source.fields()
for f in ('pk', 'cnt', 'name', 'name2', 'num_char'):
self.assertTrue(fields.lookupField(f) >= 0)
def testGetFeatures(self, source=None, extra_features=[], skip_features=[], changed_attributes={},
changed_geometries={}):
""" Test that expected results are returned when fetching all features """
# IMPORTANT - we do not use `for f in source.getFeatures()` as we are also
# testing that existing attributes & geometry in f are overwritten correctly
# (for f in ... uses a new QgsFeature for every iteration)
if not source:
source = self.source
it = source.getFeatures()
f = QgsFeature()
attributes = {}
geometries = {}
while it.nextFeature(f):
# expect feature to be valid
self.assertTrue(f.isValid())
# some source test datasets will include additional attributes which we ignore,
# so cherry pick desired attributes
attrs = [f['pk'], f['cnt'], f['name'], f['name2'], f['num_char'], f['dt'], f['date'], f['time']]
# force the num_char attribute to be text - some sources (e.g., delimited text) will
# automatically detect that this attribute contains numbers and set it as a numeric
# field
attrs[4] = str(attrs[4])
attributes[f['pk']] = attrs
geometries[f['pk']] = f.hasGeometry() and f.geometry().asWkt()
expected_attributes = {5: [5, -200, NULL, 'NuLl', '5', QDateTime(QDate(2020, 5, 4), QTime(12, 13, 14)) if not self.treat_datetime_as_string() else '2020-05-04 12:13:14', QDate(2020, 5, 2) if not self.treat_date_as_datetime() and not self.treat_date_as_string() else QDateTime(2020, 5, 2, 0, 0, 0) if not self.treat_date_as_string() else '2020-05-02', QTime(12, 13, 1) if not self.treat_time_as_string() else '12:13:01'],
3: [3, 300, 'Pear', 'PEaR', '3', NULL, NULL, NULL],
1: [1, 100, 'Orange', 'oranGe', '1', QDateTime(QDate(2020, 5, 3), QTime(12, 13, 14)) if not self.treat_datetime_as_string() else '2020-05-03 12:13:14', QDate(2020, 5, 3) if not self.treat_date_as_datetime() and not self.treat_date_as_string() else QDateTime(2020, 5, 3, 0, 0, 0) if not self.treat_date_as_string() else '2020-05-03', QTime(12, 13, 14) if not self.treat_time_as_string() else '12:13:14'],
2: [2, 200, 'Apple', 'Apple', '2', QDateTime(QDate(2020, 5, 4), QTime(12, 14, 14)) if not self.treat_datetime_as_string() else '2020-05-04 12:14:14', QDate(2020, 5, 4) if not self.treat_date_as_datetime() and not self.treat_date_as_string() else QDateTime(2020, 5, 4, 0, 0, 0) if not self.treat_date_as_string() else '2020-05-04', QTime(12, 14, 14) if not self.treat_time_as_string() else '12:14:14'],
4: [4, 400, 'Honey', 'Honey', '4', QDateTime(QDate(2021, 5, 4), QTime(13, 13, 14)) if not self.treat_datetime_as_string() else '2021-05-04 13:13:14', QDate(2021, 5, 4) if not self.treat_date_as_datetime() and not self.treat_date_as_string() else QDateTime(2021, 5, 4, 0, 0, 0) if not self.treat_date_as_string() else '2021-05-04', QTime(13, 13, 14) if not self.treat_time_as_string() else '13:13:14']}
expected_geometries = {1: 'Point (-70.332 66.33)',
2: 'Point (-68.2 70.8)',
3: None,
4: 'Point(-65.32 78.3)',
5: 'Point(-71.123 78.23)'}
for f in extra_features:
expected_attributes[f[0]] = f.attributes()
if f.hasGeometry():
expected_geometries[f[0]] = f.geometry().asWkt()
else:
expected_geometries[f[0]] = None
for i in skip_features:
del expected_attributes[i]
del expected_geometries[i]
for i, a in changed_attributes.items():
for attr_idx, v in a.items():
expected_attributes[i][attr_idx] = v
for i, g, in changed_geometries.items():
if g:
expected_geometries[i] = g.asWkt()
else:
expected_geometries[i] = None
self.assertEqual(attributes, expected_attributes, 'Expected {}, got {}'.format(expected_attributes, attributes))
self.assertEqual(len(expected_geometries), len(geometries))
for pk, geom in list(expected_geometries.items()):
if geom:
assert compareWkt(geom, geometries[pk]), "Geometry {} mismatch Expected:\n{}\nGot:\n{}\n".format(pk,
geom,
geometries[
pk])
else:
self.assertFalse(geometries[pk], 'Expected null geometry for {}'.format(pk))
def assert_query(self, source, expression, expected):
request = QgsFeatureRequest().setFilterExpression(expression).setFlags(QgsFeatureRequest.NoGeometry | QgsFeatureRequest.IgnoreStaticNodesDuringExpressionCompilation)
result = set([f['pk'] for f in source.getFeatures(request)])
assert set(expected) == result, 'Expected {} and got {} when testing expression "{}"'.format(set(expected),
result, expression)
self.assertTrue(all(f.isValid() for f in source.getFeatures(request)))
# Also check that filter works when referenced fields are not being retrieved by request
result = set([f['pk'] for f in source.getFeatures(
QgsFeatureRequest().setFilterExpression(expression).setSubsetOfAttributes(['pk'], self.source.fields()).setFlags(QgsFeatureRequest.IgnoreStaticNodesDuringExpressionCompilation))])
assert set(
expected) == result, 'Expected {} and got {} when testing expression "{}" using empty attribute subset'.format(
set(expected), result, expression)
# test that results match QgsFeatureRequest.acceptFeature
request = QgsFeatureRequest().setFilterExpression(expression).setFlags(QgsFeatureRequest.IgnoreStaticNodesDuringExpressionCompilation)
for f in source.getFeatures():
self.assertEqual(request.acceptFeature(f), f['pk'] in expected)
def runGetFeatureTests(self, source):
self.assertEqual(len([f for f in source.getFeatures()]), 5)
self.assert_query(source, 'name ILIKE \'QGIS\'', [])
self.assert_query(source, '"name" IS NULL', [5])
self.assert_query(source, '"name" IS NOT NULL', [1, 2, 3, 4])
self.assert_query(source, '"name" NOT LIKE \'Ap%\'', [1, 3, 4])
self.assert_query(source, '"name" NOT ILIKE \'QGIS\'', [1, 2, 3, 4])
self.assert_query(source, '"name" NOT ILIKE \'pEAR\'', [1, 2, 4])
self.assert_query(source, 'name = \'Apple\'', [2])
# field names themselves are NOT case sensitive -- QGIS expressions don't care about this
self.assert_query(source, '\"NaMe\" = \'Apple\'', [2])
self.assert_query(source, 'name <> \'Apple\'', [1, 3, 4])
self.assert_query(source, 'name = \'apple\'', [])
self.assert_query(source, '"name" <> \'apple\'', [1, 2, 3, 4])
self.assert_query(source, '(name = \'Apple\') is not null', [1, 2, 3, 4])
self.assert_query(source, 'name LIKE \'Apple\'', [2])
self.assert_query(source, 'name LIKE \'aPple\'', [])
self.assert_query(source, 'name LIKE \'Ap_le\'', [2])
self.assert_query(source, 'name LIKE \'Ap\\_le\'', [])
self.assert_query(source, 'name ILIKE \'aPple\'', [2])
self.assert_query(source, 'name ILIKE \'%pp%\'', [2])
self.assert_query(source, 'cnt > 0', [1, 2, 3, 4])
self.assert_query(source, '-cnt > 0', [5])
self.assert_query(source, 'cnt < 0', [5])
self.assert_query(source, '-cnt < 0', [1, 2, 3, 4])
self.assert_query(source, 'cnt >= 100', [1, 2, 3, 4])
self.assert_query(source, 'cnt <= 100', [1, 5])
self.assert_query(source, 'pk IN (1, 2, 4, 8)', [1, 2, 4])
self.assert_query(source, 'cnt = 50 * 2', [1])
self.assert_query(source, 'cnt = 150 / 1.5', [1])
self.assert_query(source, 'cnt = 1000 / 10', [1])
self.assert_query(source, 'cnt = 1000/11+10', []) # checks that source isn't rounding int/int
self.assert_query(source, 'pk = 9 // 4', [2]) # int division
self.assert_query(source, 'cnt = 99 + 1', [1])
self.assert_query(source, 'cnt = 101 - 1', [1])
self.assert_query(source, 'cnt - 1 = 99', [1])
self.assert_query(source, '-cnt - 1 = -101', [1])
self.assert_query(source, '-(-cnt) = 100', [1])
self.assert_query(source, '-(cnt) = -(100)', [1])
self.assert_query(source, 'cnt + 1 = 101', [1])
self.assert_query(source, 'cnt = 1100 % 1000', [1])
self.assert_query(source, '"name" || \' \' || "name" = \'Orange Orange\'', [1])
self.assert_query(source, '"name" || \' \' || "cnt" = \'Orange 100\'', [1])
self.assert_query(source, '\'x\' || "name" IS NOT NULL', [1, 2, 3, 4])
self.assert_query(source, '\'x\' || "name" IS NULL', [5])
self.assert_query(source, 'cnt = 10 ^ 2', [1])
self.assert_query(source, '"name" ~ \'[OP]ra[gne]+\'', [1])
self.assert_query(source, '"name"="name2"', [2, 4]) # mix of matched and non-matched case sensitive names
self.assert_query(source, 'true', [1, 2, 3, 4, 5])
self.assert_query(source, 'false', [])
# Three value logic
self.assert_query(source, 'false and false', [])
self.assert_query(source, 'false and true', [])
self.assert_query(source, 'false and NULL', [])
self.assert_query(source, 'true and false', [])
self.assert_query(source, 'true and true', [1, 2, 3, 4, 5])
self.assert_query(source, 'true and NULL', [])
self.assert_query(source, 'NULL and false', [])
self.assert_query(source, 'NULL and true', [])
self.assert_query(source, 'NULL and NULL', [])
self.assert_query(source, 'false or false', [])
self.assert_query(source, 'false or true', [1, 2, 3, 4, 5])
self.assert_query(source, 'false or NULL', [])
self.assert_query(source, 'true or false', [1, 2, 3, 4, 5])
self.assert_query(source, 'true or true', [1, 2, 3, 4, 5])
self.assert_query(source, 'true or NULL', [1, 2, 3, 4, 5])
self.assert_query(source, 'NULL or false', [])
self.assert_query(source, 'NULL or true', [1, 2, 3, 4, 5])
self.assert_query(source, 'NULL or NULL', [])
self.assert_query(source, 'not true', [])
self.assert_query(source, 'not false', [1, 2, 3, 4, 5])
self.assert_query(source, 'not null', [])
# not
self.assert_query(source, 'not name = \'Apple\'', [1, 3, 4])
self.assert_query(source, 'not name IS NULL', [1, 2, 3, 4])
self.assert_query(source, 'not name = \'Apple\' or name = \'Apple\'', [1, 2, 3, 4])
self.assert_query(source, 'not name = \'Apple\' or not name = \'Apple\'', [1, 3, 4])
self.assert_query(source, 'not name = \'Apple\' and pk = 4', [4])
self.assert_query(source, 'not name = \'Apple\' and not pk = 4', [1, 3])
self.assert_query(source, 'not pk IN (1, 2, 4, 8)', [3, 5])
# type conversion - QGIS expressions do not mind that we are comparing a string
# against numeric literals
self.assert_query(source, 'num_char IN (2, 4, 5)', [2, 4, 5])
# function
self.assert_query(source, 'sqrt(pk) >= 2', [4, 5])
self.assert_query(source, 'radians(cnt) < 2', [1, 5])
self.assert_query(source, 'degrees(pk) <= 200', [1, 2, 3])
self.assert_query(source, 'abs(cnt) <= 200', [1, 2, 5])
self.assert_query(source, 'cos(pk) < 0', [2, 3, 4])
self.assert_query(source, 'sin(pk) < 0', [4, 5])
self.assert_query(source, 'tan(pk) < 0', [2, 3, 5])
self.assert_query(source, 'acos(-1) < pk', [4, 5])
self.assert_query(source, 'asin(1) < pk', [2, 3, 4, 5])
self.assert_query(source, 'atan(3.14) < pk', [2, 3, 4, 5])
self.assert_query(source, 'atan2(3.14, pk) < 1', [3, 4, 5])
self.assert_query(source, 'exp(pk) < 10', [1, 2])
self.assert_query(source, 'ln(pk) <= 1', [1, 2])
self.assert_query(source, 'log(3, pk) <= 1', [1, 2, 3])
self.assert_query(source, 'log10(pk) < 0.5', [1, 2, 3])
self.assert_query(source, 'round(3.14) <= pk', [3, 4, 5])
self.assert_query(source, 'round(0.314,1) * 10 = pk', [3])
self.assert_query(source, 'floor(3.14) <= pk', [3, 4, 5])
self.assert_query(source, 'ceil(3.14) <= pk', [4, 5])
self.assert_query(source, 'pk < pi()', [1, 2, 3])
self.assert_query(source, 'round(cnt / 66.67) <= 2', [1, 5])
self.assert_query(source, 'floor(cnt / 66.67) <= 2', [1, 2, 5])
self.assert_query(source, 'ceil(cnt / 66.67) <= 2', [1, 5])
self.assert_query(source, 'pk < pi() / 2', [1])
self.assert_query(source, 'pk = char(51)', [3])
self.assert_query(source, 'pk = coalesce(NULL,3,4)', [3])
self.assert_query(source, 'lower(name) = \'apple\'', [2])
self.assert_query(source, 'upper(name) = \'APPLE\'', [2])
self.assert_query(source, 'name = trim(\' Apple \')', [2])
# geometry
# azimuth and touches tests are deactivated because they do not pass for WFS source
# self.assert_query(source, 'azimuth($geometry,geom_from_wkt( \'Point (-70 70)\')) < pi()', [1, 5])
self.assert_query(source, 'x($geometry) < -70', [1, 5])
self.assert_query(source, 'y($geometry) > 70', [2, 4, 5])
self.assert_query(source, 'xmin($geometry) < -70', [1, 5])
self.assert_query(source, 'ymin($geometry) > 70', [2, 4, 5])
self.assert_query(source, 'xmax($geometry) < -70', [1, 5])
self.assert_query(source, 'ymax($geometry) > 70', [2, 4, 5])
self.assert_query(source,
'disjoint($geometry,geom_from_wkt( \'Polygon ((-72.2 66.1, -65.2 66.1, -65.2 72.0, -72.2 72.0, -72.2 66.1))\'))',
[4, 5])
self.assert_query(source,
'intersects($geometry,geom_from_wkt( \'Polygon ((-72.2 66.1, -65.2 66.1, -65.2 72.0, -72.2 72.0, -72.2 66.1))\'))',
[1, 2])
# self.assert_query(source, 'touches($geometry,geom_from_wkt( \'Polygon ((-70.332 66.33, -65.32 66.33, -65.32 78.3, -70.332 78.3, -70.332 66.33))\'))', [1, 4])
self.assert_query(source,
'contains(geom_from_wkt( \'Polygon ((-72.2 66.1, -65.2 66.1, -65.2 72.0, -72.2 72.0, -72.2 66.1))\'),$geometry)',
[1, 2])
self.assert_query(source, 'distance($geometry,geom_from_wkt( \'Point (-70 70)\')) > 7', [4, 5])
self.assert_query(source,
'intersects($geometry,geom_from_gml( \'<gml:Polygon srsName="EPSG:4326"><gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>-72.2,66.1 -65.2,66.1 -65.2,72.0 -72.2,72.0 -72.2,66.1</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs></gml:Polygon>\'))',
[1, 2])
# between/not between
self.assert_query(source, 'cnt BETWEEN -200 AND 200', [1, 2, 5])
self.assert_query(source, 'cnt NOT BETWEEN 100 AND 200', [3, 4, 5])
if self.treat_datetime_as_string():
self.assert_query(source, """dt BETWEEN format_date(make_datetime(2020, 5, 3, 12, 13, 14), 'yyyy-MM-dd hh:mm:ss') AND format_date(make_datetime(2020, 5, 4, 12, 14, 14), 'yyyy-MM-dd hh:mm:ss')""", [1, 2, 5])
self.assert_query(source, """dt NOT BETWEEN format_date(make_datetime(2020, 5, 3, 12, 13, 14), 'yyyy-MM-dd hh:mm:ss') AND format_date(make_datetime(2020, 5, 4, 12, 14, 14), 'yyyy-MM-dd hh:mm:ss')""", [4])
else:
self.assert_query(source, 'dt BETWEEN make_datetime(2020, 5, 3, 12, 13, 14) AND make_datetime(2020, 5, 4, 12, 14, 14)', [1, 2, 5])
self.assert_query(source, 'dt NOT BETWEEN make_datetime(2020, 5, 3, 12, 13, 14) AND make_datetime(2020, 5, 4, 12, 14, 14)', [4])
# datetime
if self.treat_datetime_as_string():
self.assert_query(source, '"dt" <= format_date(make_datetime(2020, 5, 4, 12, 13, 14), \'yyyy-MM-dd hh:mm:ss\')', [1, 5])
self.assert_query(source, '"dt" < format_date(make_date(2020, 5, 4), \'yyyy-MM-dd hh:mm:ss\')', [1])
self.assert_query(source, '"dt" = format_date(to_datetime(\'000www14ww13ww12www4ww5ww2020\',\'zzzwwwsswwmmwwhhwwwdwwMwwyyyy\'),\'yyyy-MM-dd hh:mm:ss\')', [5])
else:
self.assert_query(source, '"dt" <= make_datetime(2020, 5, 4, 12, 13, 14)', [1, 5])
self.assert_query(source, '"dt" < make_date(2020, 5, 4)', [1])
self.assert_query(source, '"dt" = to_datetime(\'000www14ww13ww12www4ww5ww2020\',\'zzzwwwsswwmmwwhhwwwdwwMwwyyyy\')', [5])
self.assert_query(source, '"date" <= make_datetime(2020, 5, 4, 12, 13, 14)', [1, 2, 5])
self.assert_query(source, '"date" >= make_date(2020, 5, 4)', [2, 4])
if not self.treat_date_as_datetime():
self.assert_query(source,
'"date" = to_date(\'www4ww5ww2020\',\'wwwdwwMwwyyyy\')',
[2])
else:
# TODO - we don't have any expression functions which can upgrade a date value to a datetime value!
pass
if not self.treat_time_as_string():
self.assert_query(source, '"time" >= make_time(12, 14, 14)', [2, 4])
self.assert_query(source, '"time" = to_time(\'000www14ww13ww12www\',\'zzzwwwsswwmmwwhhwww\')', [1])
else:
self.assert_query(source, 'to_time("time") >= make_time(12, 14, 14)', [2, 4])
self.assert_query(source, 'to_time("time") = to_time(\'000www14ww13ww12www\',\'zzzwwwsswwmmwwhhwww\')', [1])
# TODO - enable, but needs fixing on Travis due to timezone handling issues
# if self.treat_datetime_as_string():
# self.assert_query(source, 'to_datetime("dt", \'yyyy-MM-dd hh:mm:ss\') + make_interval(days:=1) <= make_datetime(2020, 5, 4, 12, 13, 14)', [1])
# self.assert_query(source, 'to_datetime("dt", \'yyyy-MM-dd hh:mm:ss\') + make_interval(days:=0.01) <= make_datetime(2020, 5, 4, 12, 13, 14)', [1, 5])
# else:
# self.assert_query(source, '"dt" + make_interval(days:=1) <= make_datetime(2020, 5, 4, 12, 13, 14)', [1])
# self.assert_query(source, '"dt" + make_interval(days:=0.01) <= make_datetime(2020, 5, 4, 12, 13, 14)', [1, 5])
# combination of an uncompilable expression and limit
# TODO - move this test to FeatureSourceTestCase
# it's currently added in ProviderTestCase, but tests only using a QgsVectorLayer getting features,
# i.e. not directly requesting features from the provider. Turns out the WFS provider fails this
# and should be fixed - then we can enable this test at the FeatureSourceTestCase level
# feature = next(self.source.getFeatures(QgsFeatureRequest().setFilterExpression('pk=4')))
# context = QgsExpressionContext()
# scope = QgsExpressionContextScope()
# scope.setVariable('parent', feature)
# context.appendScope(scope)
# request = QgsFeatureRequest()
# request.setExpressionContext(context)
# request.setFilterExpression('"pk" = attribute(@parent, \'pk\')')
# request.setLimit(1)
# values = [f['pk'] for f in self.source.getFeatures(request)]
# self.assertEqual(values, [4])
def testGetFeaturesExp(self):
self.runGetFeatureTests(self.source)
def runOrderByTests(self):
request = QgsFeatureRequest().addOrderBy('cnt')
values = [f['cnt'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [-200, 100, 200, 300, 400])
request = QgsFeatureRequest().addOrderBy('cnt', False)
values = [f['cnt'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [400, 300, 200, 100, -200])
request = QgsFeatureRequest().addOrderBy('name')
values = [f['name'] for f in self.source.getFeatures(request)]
self.assertEqual(values, ['Apple', 'Honey', 'Orange', 'Pear', NULL])
request = QgsFeatureRequest().addOrderBy('name', True, True)
values = [f['name'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [NULL, 'Apple', 'Honey', 'Orange', 'Pear'])
request = QgsFeatureRequest().addOrderBy('name', False)
values = [f['name'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [NULL, 'Pear', 'Orange', 'Honey', 'Apple'])
request = QgsFeatureRequest().addOrderBy('name', False, False)
values = [f['name'] for f in self.source.getFeatures(request)]
self.assertEqual(values, ['Pear', 'Orange', 'Honey', 'Apple', NULL])
request = QgsFeatureRequest().addOrderBy('num_char', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4, 3, 2, 1])
request = QgsFeatureRequest().addOrderBy('dt', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [3, 4, 2, 5, 1])
request = QgsFeatureRequest().addOrderBy('date', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [3, 4, 2, 1, 5])
request = QgsFeatureRequest().addOrderBy('time', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [3, 4, 2, 1, 5])
# Case sensitivity
request = QgsFeatureRequest().addOrderBy('name2')
values = [f['name2'] for f in self.source.getFeatures(request)]
self.assertEqual(values, ['Apple', 'Honey', 'NuLl', 'oranGe', 'PEaR'])
# Combination with LIMIT
request = QgsFeatureRequest().addOrderBy('pk', False).setLimit(2)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4])
# A slightly more complex expression
request = QgsFeatureRequest().addOrderBy('pk*2', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4, 3, 2, 1])
# Order reversing expression
request = QgsFeatureRequest().addOrderBy('pk*-1', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [1, 2, 3, 4, 5])
# Type dependent expression
request = QgsFeatureRequest().addOrderBy('num_char*2', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4, 3, 2, 1])
# Order by guaranteed to fail
request = QgsFeatureRequest().addOrderBy('not a valid expression*', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(set(values), set([5, 4, 3, 2, 1]))
# Multiple order bys and boolean
request = QgsFeatureRequest().addOrderBy('pk > 2').addOrderBy('pk', False)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [2, 1, 5, 4, 3])
# Multiple order bys, one bad, and a limit
request = QgsFeatureRequest().addOrderBy('pk', False).addOrderBy('not a valid expression*', False).setLimit(2)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4])
# Bad expression first
request = QgsFeatureRequest().addOrderBy('not a valid expression*', False).addOrderBy('pk', False).setLimit(2)
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4])
# Combination with subset of attributes
request = QgsFeatureRequest().addOrderBy('num_char', False).setSubsetOfAttributes(['pk'], self.source.fields())
values = [f['pk'] for f in self.source.getFeatures(request)]
self.assertEqual(values, [5, 4, 3, 2, 1])
def testOrderBy(self):
self.runOrderByTests()
def testOpenIteratorAfterSourceRemoval(self):
"""
Test that removing source after opening an iterator does not crash. All required
information should be captured in the iterator's source and there MUST be no
links between the iterators and the sources's data source
"""
if not getattr(self, 'getSource', None):
return
source = self.getSource()
it = source.getFeatures()
del source
# get the features
pks = []
for f in it:
pks.append(f['pk'])
self.assertEqual(set(pks), {1, 2, 3, 4, 5})
def testGetFeaturesFidTests(self):
fids = [f.id() for f in self.source.getFeatures()]
assert len(fids) == 5, 'Expected 5 features, got {} instead'.format(len(fids))
for id in fids:
features = [f for f in self.source.getFeatures(QgsFeatureRequest().setFilterFid(id))]
self.assertEqual(len(features), 1)
feature = features[0]
self.assertTrue(feature.isValid())
result = [feature.id()]
expected = [id]
assert result == expected, 'Expected {} and got {} when testing for feature ID filter'.format(expected,
result)
# test that results match QgsFeatureRequest.acceptFeature
request = QgsFeatureRequest().setFilterFid(id)
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f.id() == id)
# bad features
it = self.source.getFeatures(QgsFeatureRequest().setFilterFid(-99999999))
feature = QgsFeature(5)
feature.setValid(False)
self.assertFalse(it.nextFeature(feature))
self.assertFalse(feature.isValid())
def testGetFeaturesFidsTests(self):
fids = [f.id() for f in self.source.getFeatures()]
self.assertEqual(len(fids), 5)
# empty list = no features
request = QgsFeatureRequest().setFilterFids([])
result = set([f.id() for f in self.source.getFeatures(request)])
self.assertFalse(result)
request = QgsFeatureRequest().setFilterFids([fids[0], fids[2]])
result = set([f.id() for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
expected = set([fids[0], fids[2]])
assert result == expected, 'Expected {} and got {} when testing for feature IDs filter'.format(expected, result)
self.assertTrue(all_valid)
# test that results match QgsFeatureRequest.acceptFeature
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f.id() in expected)
result = set(
[f.id() for f in self.source.getFeatures(QgsFeatureRequest().setFilterFids([fids[1], fids[3], fids[4]]))])
expected = set([fids[1], fids[3], fids[4]])
assert result == expected, 'Expected {} and got {} when testing for feature IDs filter'.format(expected, result)
# sources should ignore non-existent fids
result = set([f.id() for f in self.source.getFeatures(
QgsFeatureRequest().setFilterFids([-101, fids[1], -102, fids[3], -103, fids[4], -104]))])
expected = set([fids[1], fids[3], fids[4]])
assert result == expected, 'Expected {} and got {} when testing for feature IDs filter'.format(expected, result)
result = set([f.id() for f in self.source.getFeatures(QgsFeatureRequest().setFilterFids([]))])
expected = set([])
assert result == expected, 'Expected {} and got {} when testing for feature IDs filter'.format(expected, result)
# Rewind mid-way
request = QgsFeatureRequest().setFilterFids([fids[1], fids[3], fids[4]])
feature_it = self.source.getFeatures(request)
feature = QgsFeature()
feature.setValid(True)
self.assertTrue(feature_it.nextFeature(feature))
self.assertIn(feature.id(), [fids[1], fids[3], fids[4]])
first_feature = feature
self.assertTrue(feature.isValid())
# rewind
self.assertTrue(feature_it.rewind())
self.assertTrue(feature_it.nextFeature(feature))
self.assertEqual(feature.id(), first_feature.id())
self.assertTrue(feature.isValid())
# grab all features
self.assertTrue(feature_it.nextFeature(feature))
self.assertTrue(feature_it.nextFeature(feature))
# none left
self.assertFalse(feature_it.nextFeature(feature))
self.assertFalse(feature.isValid())
def testGetFeaturesFilterRectTests(self):
extent = QgsRectangle(-70, 67, -60, 80)
request = QgsFeatureRequest().setFilterRect(extent)
features = [f['pk'] for f in self.source.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
assert set(features) == set([2, 4]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
# test that results match QgsFeatureRequest.acceptFeature
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f['pk'] in set([2, 4]))
# test with an empty rectangle
extent = QgsRectangle()
request = QgsFeatureRequest().setFilterRect(extent)
features = [f['pk'] for f in self.source.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
assert set(features) == set([1, 2, 3, 4, 5]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
# ExactIntersection flag set, but no filter rect set. Should be ignored.
request = QgsFeatureRequest()
request.setFlags(QgsFeatureRequest.ExactIntersect)
features = [f['pk'] for f in self.source.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
assert set(features) == set([1, 2, 3, 4, 5]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
def testRectAndExpression(self):
extent = QgsRectangle(-70, 67, -60, 80)
request = QgsFeatureRequest().setFilterExpression('"cnt">200').setFilterRect(extent)
result = set([f['pk'] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
expected = [4]
assert set(
expected) == result, 'Expected {} and got {} when testing for combination of filterRect and expression'.format(
set(expected), result)
self.assertTrue(all_valid)
# shouldn't matter what order this is done in
request = QgsFeatureRequest().setFilterRect(extent).setFilterExpression('"cnt">200')
result = set([f['pk'] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
expected = [4]
assert set(
expected) == result, 'Expected {} and got {} when testing for combination of filterRect and expression'.format(
set(expected), result)
self.assertTrue(all_valid)
# test that results match QgsFeatureRequest.acceptFeature
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f['pk'] in expected)
def testGetFeaturesDistanceWithinTests(self):
request = QgsFeatureRequest().setDistanceWithin(QgsGeometry.fromWkt('LineString (-63.2 69.9, -68.47 69.86, -69.74 79.28)'), 1.7)
features = [f['pk'] for f in self.source.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
assert set(features) == set([2, 5]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
# test that results match QgsFeatureRequest.acceptFeature
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f['pk'] in set([2, 5]))
request = QgsFeatureRequest().setDistanceWithin(QgsGeometry.fromWkt('LineString (-63.2 69.9, -68.47 69.86, -69.74 79.28)'), 0.6)
features = [f['pk'] for f in self.source.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
assert set(features) == set([2]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
# test that results match QgsFeatureRequest.acceptFeature
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f['pk'] in set([2]))
# in different crs
request = QgsFeatureRequest().setDestinationCrs(QgsCoordinateReferenceSystem('EPSG:3857'), QgsProject.instance().transformContext()).setDistanceWithin(QgsGeometry.fromWkt('LineString (-7035391 11036245, -7622045 11023301, -7763421 15092839)'), 250000)
features = [f['pk'] for f in self.source.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
self.assertEqual(set(features), {2, 5})
self.assertTrue(all_valid)
# point geometry
request = QgsFeatureRequest().setDistanceWithin(
QgsGeometry.fromWkt('Point (-68.1 78.1)'), 3.6)
features = [f['pk'] for f in self.source.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
assert set(features) == set([4, 5]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
# test that results match QgsFeatureRequest.acceptFeature
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f['pk'] in set([4, 5]))
request = QgsFeatureRequest().setDistanceWithin(
QgsGeometry.fromWkt('Polygon ((-64.47 79.59, -64.37 73.59, -72.69 73.61, -72.73 68.07, -62.51 68.01, -62.71 79.55, -64.47 79.59))'), 0)
features = [f['pk'] for f in self.source.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
assert set(features) == set([2]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
# test that results match QgsFeatureRequest.acceptFeature
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f['pk'] in set([2]))
request = QgsFeatureRequest().setDistanceWithin(
QgsGeometry.fromWkt('Polygon ((-64.47 79.59, -64.37 73.59, -72.69 73.61, -72.73 68.07, -62.51 68.01, -62.71 79.55, -64.47 79.59))'), 1.3)
features = [f['pk'] for f in self.source.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
assert set(features) == set([2, 4]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
# test that results match QgsFeatureRequest.acceptFeature
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f['pk'] in set([2, 4]))
request = QgsFeatureRequest().setDistanceWithin(
QgsGeometry.fromWkt('Polygon ((-64.47 79.59, -64.37 73.59, -72.69 73.61, -72.73 68.07, -62.51 68.01, -62.71 79.55, -64.47 79.59))'), 2.3)
features = [f['pk'] for f in self.source.getFeatures(request)]
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
assert set(features) == set([1, 2, 4]), 'Got {} instead'.format(features)
self.assertTrue(all_valid)
# test that results match QgsFeatureRequest.acceptFeature
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f['pk'] in set([1, 2, 4]))
# test with linestring whose bounding box overlaps all query
# points but being only within one of them, which we hope will
# be returned NOT as the first one.
# This is a test for https://github.com/qgis/QGIS/issues/45352
request = QgsFeatureRequest().setDistanceWithin(
QgsGeometry.fromWkt('LINESTRING(-100 80, -100 66, -30 66, -30 80)'), 0.5)
features = {f['pk'] for f in self.source.getFeatures(request)}
self.assertEqual(features, {1}, "Unexpected return from QgsFeatureRequest with DistanceWithin filter")
def testGeomAndAllAttributes(self):
"""
Test combination of a filter which requires geometry and all attributes
"""
request = QgsFeatureRequest().setFilterExpression(
'attribute($currentfeature,\'cnt\')>200 and $x>=-70 and $x<=-60').setSubsetOfAttributes([]).setFlags(
QgsFeatureRequest.NoGeometry | QgsFeatureRequest.IgnoreStaticNodesDuringExpressionCompilation)
result = set([f['pk'] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
self.assertEqual(result, {4})
self.assertTrue(all_valid)
request = QgsFeatureRequest().setFilterExpression(
'attribute($currentfeature,\'cnt\')>200 and $x>=-70 and $x<=-60').setFlags(QgsFeatureRequest.IgnoreStaticNodesDuringExpressionCompilation)
result = set([f['pk'] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
self.assertEqual(result, {4})
self.assertTrue(all_valid)
def testRectAndFids(self):
"""
Test the combination of a filter rect along with filterfids
"""
# first get feature ids
ids = {f['pk']: f.id() for f in self.source.getFeatures()}
extent = QgsRectangle(-70, 67, -60, 80)
request = QgsFeatureRequest().setFilterFids([ids[3], ids[4]]).setFilterRect(extent)
result = set([f['pk'] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
expected = [4]
assert set(
expected) == result, 'Expected {} and got {} when testing for combination of filterRect and expression'.format(
set(expected), result)
self.assertTrue(all_valid)
# shouldn't matter what order this is done in
request = QgsFeatureRequest().setFilterRect(extent).setFilterFids([ids[3], ids[4]])
result = set([f['pk'] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
expected = [4]
assert set(
expected) == result, 'Expected {} and got {} when testing for combination of filterRect and expression'.format(
set(expected), result)
self.assertTrue(all_valid)
# test that results match QgsFeatureRequest.acceptFeature
for f in self.source.getFeatures():
self.assertEqual(request.acceptFeature(f), f['pk'] in expected)
def testGetFeaturesDestinationCrs(self):
request = QgsFeatureRequest().setDestinationCrs(QgsCoordinateReferenceSystem('epsg:3785'),
QgsProject.instance().transformContext())
features = {f['pk']: f for f in self.source.getFeatures(request)}
# test that features have been reprojected
self.assertAlmostEqual(features[1].geometry().constGet().x(), -7829322, -5)
self.assertAlmostEqual(features[1].geometry().constGet().y(), 9967753, -5)
self.assertAlmostEqual(features[2].geometry().constGet().x(), -7591989, -5)
self.assertAlmostEqual(features[2].geometry().constGet().y(), 11334232, -5)
self.assertFalse(features[3].hasGeometry())
self.assertAlmostEqual(features[4].geometry().constGet().x(), -7271389, -5)
self.assertAlmostEqual(features[4].geometry().constGet().y(), 14531322, -5)
self.assertAlmostEqual(features[5].geometry().constGet().x(), -7917376, -5)
self.assertAlmostEqual(features[5].geometry().constGet().y(), 14493008, -5)
# when destination crs is set, filter rect should be in destination crs
rect = QgsRectangle(-7650000, 10500000, -7200000, 15000000)
request = QgsFeatureRequest().setDestinationCrs(QgsCoordinateReferenceSystem('epsg:3785'),
QgsProject.instance().transformContext()).setFilterRect(rect)
features = {f['pk']: f for f in self.source.getFeatures(request)}
self.assertEqual(set(features.keys()), {2, 4})
# test that features have been reprojected
self.assertAlmostEqual(features[2].geometry().constGet().x(), -7591989, -5)
self.assertAlmostEqual(features[2].geometry().constGet().y(), 11334232, -5)
self.assertAlmostEqual(features[4].geometry().constGet().x(), -7271389, -5)
self.assertAlmostEqual(features[4].geometry().constGet().y(), 14531322, -5)
# bad rect for transform
rect = QgsRectangle(-99999999999, 99999999999, -99999999998, 99999999998)
request = QgsFeatureRequest().setDestinationCrs(QgsCoordinateReferenceSystem('epsg:28356'),
QgsProject.instance().transformContext()).setFilterRect(rect)
features = [f for f in self.source.getFeatures(request)]
self.assertFalse(features)
def testGetFeaturesLimit(self):
it = self.source.getFeatures(QgsFeatureRequest().setLimit(2))
features = [f['pk'] for f in it]
assert len(features) == 2, 'Expected two features, got {} instead'.format(len(features))
# fetch one feature
feature = QgsFeature()
assert not it.nextFeature(feature), 'Expected no feature after limit, got one'
it.rewind()
features = [f['pk'] for f in it]
assert len(features) == 2, 'Expected two features after rewind, got {} instead'.format(len(features))
it.rewind()
assert it.nextFeature(feature), 'Expected feature after rewind, got none'
it.rewind()
features = [f['pk'] for f in it]
assert len(features) == 2, 'Expected two features after rewind, got {} instead'.format(len(features))
# test with expression, both with and without compilation
try:
self.disableCompiler()
except AttributeError:
pass
it = self.source.getFeatures(QgsFeatureRequest().setLimit(2).setFilterExpression('cnt <= 100'))
features = [f['pk'] for f in it]
assert set(features) == set([1, 5]), 'Expected [1,5] for expression and feature limit, Got {} instead'.format(
features)
try:
self.enableCompiler()
except AttributeError:
pass
it = self.source.getFeatures(QgsFeatureRequest().setLimit(2).setFilterExpression('cnt <= 100'))
features = [f['pk'] for f in it]
assert set(features) == set([1, 5]), 'Expected [1,5] for expression and feature limit, Got {} instead'.format(
features)
# limit to more features than exist
it = self.source.getFeatures(QgsFeatureRequest().setLimit(3).setFilterExpression('cnt <= 100'))
features = [f['pk'] for f in it]
assert set(features) == set([1, 5]), 'Expected [1,5] for expression and feature limit, Got {} instead'.format(
features)
# limit to less features than possible
it = self.source.getFeatures(QgsFeatureRequest().setLimit(1).setFilterExpression('cnt <= 100'))
features = [f['pk'] for f in it]
assert 1 in features or 5 in features, 'Expected either 1 or 5 for expression and feature limit, Got {} instead'.format(
features)
def testClosedIterators(self):
""" Test behavior of closed iterators """
# Test retrieving feature after closing iterator
f_it = self.source.getFeatures(QgsFeatureRequest())
fet = QgsFeature()
assert f_it.nextFeature(fet), 'Could not fetch feature'
assert fet.isValid(), 'Feature is not valid'
assert f_it.close(), 'Could not close iterator'
self.assertFalse(f_it.nextFeature(fet),
'Fetched feature after iterator closed, expected nextFeature() to return False')
self.assertFalse(fet.isValid(), 'Valid feature fetched from closed iterator, should be invalid')
# Test rewinding closed iterator
self.assertFalse(f_it.rewind(), 'Rewinding closed iterator successful, should not be allowed')
def testGetFeaturesSubsetAttributes(self):
""" Test that expected results are returned when using subsets of attributes """
tests = {'pk': set([1, 2, 3, 4, 5]),
'cnt': set([-200, 300, 100, 200, 400]),
'name': set(['Pear', 'Orange', 'Apple', 'Honey', NULL]),
'name2': set(['NuLl', 'PEaR', 'oranGe', 'Apple', 'Honey']),
'dt': set([NULL, '2021-05-04 13:13:14' if self.treat_datetime_as_string() else QDateTime(2021, 5, 4, 13, 13, 14) if not self.treat_datetime_as_string() else '2021-05-04 13:13:14',
'2020-05-04 12:14:14' if self.treat_datetime_as_string() else QDateTime(2020, 5, 4, 12, 14, 14) if not self.treat_datetime_as_string() else '2020-05-04 12:14:14',
'2020-05-04 12:13:14' if self.treat_datetime_as_string() else QDateTime(2020, 5, 4, 12, 13, 14) if not self.treat_datetime_as_string() else '2020-05-04 12:13:14',
'2020-05-03 12:13:14' if self.treat_datetime_as_string() else QDateTime(2020, 5, 3, 12, 13, 14) if not self.treat_datetime_as_string() else '2020-05-03 12:13:14']),
'date': set([NULL,
'2020-05-02' if self.treat_date_as_string() else QDate(2020, 5, 2) if not self.treat_date_as_datetime() else QDateTime(2020, 5, 2, 0, 0, 0),
'2020-05-03' if self.treat_date_as_string() else QDate(2020, 5, 3) if not self.treat_date_as_datetime() else QDateTime(2020, 5, 3, 0, 0, 0),
'2020-05-04' if self.treat_date_as_string() else QDate(2020, 5, 4) if not self.treat_date_as_datetime() else QDateTime(2020, 5, 4, 0, 0, 0),
'2021-05-04' if self.treat_date_as_string() else QDate(2021, 5, 4) if not self.treat_date_as_datetime() else QDateTime(2021, 5, 4, 0, 0, 0)]),
'time': set([QTime(12, 13, 1) if not self.treat_time_as_string() else '12:13:01',
QTime(12, 14, 14) if not self.treat_time_as_string() else '12:14:14',
QTime(12, 13, 14) if not self.treat_time_as_string() else '12:13:14',
QTime(13, 13, 14) if not self.treat_time_as_string() else '13:13:14', NULL])}
for field, expected in list(tests.items()):
request = QgsFeatureRequest().setSubsetOfAttributes([field], self.source.fields())
result = set([f[field] for f in self.source.getFeatures(request)])
all_valid = (all(f.isValid() for f in self.source.getFeatures(request)))
self.assertEqual(result, expected, 'Expected {}, got {}'.format(expected, result))
self.assertTrue(all_valid)
def testGetFeaturesSubsetAttributes2(self):
""" Test that other fields are NULL when fetching subsets of attributes """
for field_to_fetch in ['pk', 'cnt', 'name', 'name2', 'dt', 'date', 'time']:
for f in self.source.getFeatures(
QgsFeatureRequest().setSubsetOfAttributes([field_to_fetch], self.source.fields())):
# Check that all other fields are NULL and force name to lower-case
for other_field in [field.name() for field in self.source.fields() if
field.name().lower() != field_to_fetch]:
if other_field == 'pk' or other_field == 'PK':
# skip checking the primary key field, as it may be validly fetched by providers to use as feature id
continue
self.assertEqual(f[other_field], NULL,
'Value for field "{}" was present when it should not have been fetched by request'.format(
other_field))
def testGetFeaturesNoGeometry(self):
""" Test that no geometry is present when fetching features without geometry"""
for f in self.source.getFeatures(QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry)):
self.assertFalse(f.hasGeometry(), 'Expected no geometry, got one')
self.assertTrue(f.isValid())
def testGetFeaturesWithGeometry(self):
""" Test that geometry is present when fetching features without setting NoGeometry flag"""
for f in self.source.getFeatures(QgsFeatureRequest()):
if f['pk'] == 3:
# no geometry for this feature
continue
assert f.hasGeometry(), 'Expected geometry, got none'
self.assertTrue(f.isValid())
def testUniqueValues(self):
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('cnt'))),
set([-200, 100, 200, 300, 400]))
assert set(['Apple', 'Honey', 'Orange', 'Pear', NULL]) == set(
self.source.uniqueValues(self.source.fields().lookupField('name'))), 'Got {}'.format(
set(self.source.uniqueValues(self.source.fields().lookupField('name'))))
if self.treat_datetime_as_string():
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('dt'))),
set(['2021-05-04 13:13:14', '2020-05-04 12:14:14', '2020-05-04 12:13:14', '2020-05-03 12:13:14', NULL]))
else:
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('dt'))),
set([QDateTime(2021, 5, 4, 13, 13, 14), QDateTime(2020, 5, 4, 12, 14, 14), QDateTime(2020, 5, 4, 12, 13, 14), QDateTime(2020, 5, 3, 12, 13, 14), NULL]))
if self.treat_date_as_string():
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('date'))),
set(['2020-05-03', '2020-05-04', '2021-05-04', '2020-05-02', NULL]))
elif self.treat_date_as_datetime():
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('date'))),
set([QDateTime(2020, 5, 3, 0, 0, 0), QDateTime(2020, 5, 4, 0, 0, 0), QDateTime(2021, 5, 4, 0, 0, 0), QDateTime(2020, 5, 2, 0, 0, 0), NULL]))
else:
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('date'))),
set([QDate(2020, 5, 3), QDate(2020, 5, 4), QDate(2021, 5, 4), QDate(2020, 5, 2), NULL]))
if self.treat_time_as_string():
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('time'))),
set(['12:14:14', '13:13:14', '12:13:14', '12:13:01', NULL]))
else:
self.assertEqual(set(self.source.uniqueValues(self.source.fields().lookupField('time'))),
set([QTime(12, 14, 14), QTime(13, 13, 14), QTime(12, 13, 14), QTime(12, 13, 1), NULL]))
def testMinimumValue(self):
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('cnt')), -200)
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('name')), 'Apple')
if self.treat_datetime_as_string():
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('dt')), '2020-05-03 12:13:14')
else:
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('dt')), QDateTime(QDate(2020, 5, 3), QTime(12, 13, 14)))
if self.treat_date_as_string():
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('date')), '2020-05-02')
elif not self.treat_date_as_datetime():
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('date')), QDate(2020, 5, 2))
else:
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('date')), QDateTime(2020, 5, 2, 0, 0, 0))
if not self.treat_time_as_string():
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('time')), QTime(12, 13, 1))
else:
self.assertEqual(self.source.minimumValue(self.source.fields().lookupField('time')), '12:13:01')
def testMaximumValue(self):
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('cnt')), 400)
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('name')), 'Pear')
if not self.treat_datetime_as_string():
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('dt')), QDateTime(QDate(2021, 5, 4), QTime(13, 13, 14)))
else:
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('dt')), '2021-05-04 13:13:14')
if self.treat_date_as_string():
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('date')), '2021-05-04')
elif not self.treat_date_as_datetime():
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('date')), QDate(2021, 5, 4))
else:
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('date')), QDateTime(2021, 5, 4, 0, 0, 0))
if not self.treat_time_as_string():
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('time')), QTime(13, 13, 14))
else:
self.assertEqual(self.source.maximumValue(self.source.fields().lookupField('time')), '13:13:14')
def testAllFeatureIds(self):
ids = set([f.id() for f in self.source.getFeatures()])
self.assertEqual(set(self.source.allFeatureIds()), ids)
def testSubsetOfAttributesWithFilterExprWithNonExistingColumn(self):
""" Test fix for https://github.com/qgis/QGIS/issues/33878 """
request = QgsFeatureRequest().setSubsetOfAttributes([0])
request.setFilterExpression("non_existing = 1")
features = [f for f in self.source.getFeatures(request)]
self.assertEqual(len(features), 0)
| gpl-2.0 |
shangwuhencc/scikit-learn | examples/cluster/plot_cluster_iris.py | 347 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
lakshayg/tensorflow | tensorflow/contrib/boosted_trees/examples/mnist.py | 61 | 5840 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Demonstrates multiclass MNIST TF Boosted trees example.
This example demonstrates how to run experiments with TF Boosted Trees on
a MNIST dataset. We are using layer by layer boosting with diagonal hessian
strategy for multiclass handling, and cross entropy loss.
Example Usage:
python tensorflow/contrib/boosted_trees/examples/mnist.py \
--output_dir="/tmp/mnist" --depth=4 --learning_rate=0.3 --batch_size=60000 \
--examples_per_layer=60000 --eval_batch_size=10000 --num_eval_steps=1 \
--num_trees=10 --l2=1 --vmodule=training_ops=1
When training is done, accuracy on eval data is reported. Point tensorboard
to the directory for the run to see how the training progresses:
tensorboard --logdir=/tmp/mnist
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
from tensorflow.contrib.boosted_trees.estimator_batch.estimator import GradientBoostedDecisionTreeClassifier
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.learn import learn_runner
def get_input_fn(dataset_split,
batch_size,
capacity=10000,
min_after_dequeue=3000):
"""Input function over MNIST data."""
def _input_fn():
"""Prepare features and labels."""
images_batch, labels_batch = tf.train.shuffle_batch(
tensors=[dataset_split.images,
dataset_split.labels.astype(np.int32)],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
enqueue_many=True,
num_threads=4)
features_map = {"images": images_batch}
return features_map, labels_batch
return _input_fn
# Main config - creates a TF Boosted Trees Estimator based on flags.
def _get_tfbt(output_dir):
"""Configures TF Boosted Trees estimator based on flags."""
learner_config = learner_pb2.LearnerConfig()
num_classes = 10
learner_config.learning_rate_tuner.fixed.learning_rate = FLAGS.learning_rate
learner_config.num_classes = num_classes
learner_config.regularization.l1 = 0.0
learner_config.regularization.l2 = FLAGS.l2 / FLAGS.examples_per_layer
learner_config.constraints.max_tree_depth = FLAGS.depth
growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
learner_config.growing_mode = growing_mode
run_config = tf.contrib.learn.RunConfig(save_checkpoints_secs=300)
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
# Create a TF Boosted trees estimator that can take in custom loss.
estimator = GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
n_classes=num_classes,
examples_per_layer=FLAGS.examples_per_layer,
model_dir=output_dir,
num_trees=FLAGS.num_trees,
center_bias=False,
config=run_config)
return estimator
def _make_experiment_fn(output_dir):
"""Creates experiment for gradient boosted decision trees."""
data = tf.contrib.learn.datasets.mnist.load_mnist()
train_input_fn = get_input_fn(data.train, FLAGS.batch_size)
eval_input_fn = get_input_fn(data.validation, FLAGS.eval_batch_size)
return tf.contrib.learn.Experiment(
estimator=_get_tfbt(output_dir),
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=None,
eval_steps=FLAGS.num_eval_steps,
eval_metrics=None)
def main(unused_argv):
learn_runner.run(
experiment_fn=_make_experiment_fn,
output_dir=FLAGS.output_dir,
schedule="train_and_evaluate")
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
# Define the list of flags that users can change.
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Choose the dir for the output.")
parser.add_argument(
"--batch_size",
type=int,
default=1000,
help="The batch size for reading data.")
parser.add_argument(
"--eval_batch_size",
type=int,
default=1000,
help="Size of the batch for eval.")
parser.add_argument(
"--num_eval_steps",
type=int,
default=1,
help="The number of steps to run evaluation for.")
# Flags for gradient boosted trees config.
parser.add_argument(
"--depth", type=int, default=4, help="Maximum depth of weak learners.")
parser.add_argument(
"--l2", type=float, default=1.0, help="l2 regularization per batch.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.1,
help="Learning rate (shrinkage weight) with which each new tree is added."
)
parser.add_argument(
"--examples_per_layer",
type=int,
default=1000,
help="Number of examples to accumulate stats for per layer.")
parser.add_argument(
"--num_trees",
type=int,
default=None,
required=True,
help="Number of trees to grow before stopping.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
aalmah/pylearn2 | pylearn2/utils/utlc.py | 49 | 7347 | """Several utilities for experimenting upon utlc datasets"""
# Standard library imports
import logging
import os
import inspect
import zipfile
from tempfile import TemporaryFile
# Third-party imports
import numpy
import theano
from pylearn2.datasets.utlc import load_ndarray_dataset, load_sparse_dataset
from pylearn2.utils import subdict, sharedX
logger = logging.getLogger(__name__)
##################################################
# Shortcuts and auxiliary functions
##################################################
def getboth(dict1, dict2, key, default=None):
"""
Try to retrieve key from dict1 if exists, otherwise try with dict2.
If the key is not found in any of them, raise an exception.
Parameters
----------
dict1 : dict
WRITEME
dict2 : dict
WRITEME
key : WRITEME
default : WRITEME
Returns
-------
WRITEME
"""
try:
return dict1[key]
except KeyError:
if default is None:
return dict2[key]
else:
return dict2.get(key, default)
##################################################
# Datasets loading and contest facilities
##################################################
def load_data(conf):
"""
Loads a specified dataset according to the parameters in the dictionary
Parameters
----------
conf : WRITEME
Returns
-------
WRITEME
"""
logger.info('... loading dataset')
# Special case for sparse format
if conf.get('sparse', False):
expected = inspect.getargspec(load_sparse_dataset)[0][1:]
data = load_sparse_dataset(conf['dataset'], **subdict(conf, expected))
valid, test = data[1:3]
# Sparse TERRY data on LISA servers contains an extra null first row in
# valid and test subsets.
if conf['dataset'] == 'terry':
valid = valid[1:]
test = test[1:]
assert valid.shape[0] == test.shape[0] == 4096, \
'Sparse TERRY data loaded has wrong number of examples'
if len(data) == 3:
return [data[0], valid, test]
else:
return [data[0], valid, test, data[3]]
# Load as the usual ndarray
expected = inspect.getargspec(load_ndarray_dataset)[0][1:]
data = load_ndarray_dataset(conf['dataset'], **subdict(conf, expected))
# Special case for on-the-fly normalization
if conf.get('normalize_on_the_fly', False):
return data
# Allocate shared variables
def shared_dataset(data_x):
"""Function that loads the dataset into shared variables"""
if conf.get('normalize', True):
return sharedX(data_x, borrow=True)
else:
return theano.shared(theano._asarray(data_x), borrow=True)
return map(shared_dataset, data)
def save_submission(conf, valid_repr, test_repr):
"""
Create a submission file given a configuration dictionary and a
representation for valid and test.
Parameters
----------
conf : WRITEME
valid_repr : WRITEME
test_repr : WRITEME
"""
logger.info('... creating zipfile')
# Ensure the given directory is correct
submit_dir = conf['savedir']
if not os.path.exists(submit_dir):
os.makedirs(submit_dir)
elif not os.path.isdir(submit_dir):
raise IOError('savedir %s is not a directory' % submit_dir)
basename = os.path.join(submit_dir, conf['dataset'] + '_' + conf['expname'])
# If there are too much features, outputs kernel matrices
if (valid_repr.shape[1] > valid_repr.shape[0]):
valid_repr = numpy.dot(valid_repr, valid_repr.T)
test_repr = numpy.dot(test_repr, test_repr.T)
# Quantitize data
valid_repr = numpy.floor((valid_repr / valid_repr.max())*999)
test_repr = numpy.floor((test_repr / test_repr.max())*999)
# Store the representations in two temporary files
valid_file = TemporaryFile()
test_file = TemporaryFile()
numpy.savetxt(valid_file, valid_repr, fmt="%.3f")
numpy.savetxt(test_file, test_repr, fmt="%.3f")
# Reread those files and put them together in a .zip
valid_file.seek(0)
test_file.seek(0)
submission = zipfile.ZipFile(basename + ".zip", "w",
compression=zipfile.ZIP_DEFLATED)
submission.writestr(basename + '_valid.prepro', valid_file.read())
submission.writestr(basename + '_final.prepro', test_file.read())
submission.close()
valid_file.close()
test_file.close()
def create_submission(conf, transform_valid, transform_test=None, features=None):
"""
Create a submission file given a configuration dictionary and a
computation function.
Note that it always reload the datasets to ensure valid & test
are not permuted.
Parameters
----------
conf : WRITEME
transform_valid : WRITEME
transform_test : WRITEME
features : WRITEME
"""
if transform_test is None:
transform_test = transform_valid
# Load the dataset, without permuting valid and test
kwargs = subdict(conf, ['dataset', 'normalize', 'normalize_on_the_fly', 'sparse'])
kwargs.update(randomize_valid=False, randomize_test=False)
valid_set, test_set = load_data(kwargs)[1:3]
# Sparse datasets are not stored as Theano shared vars.
if not conf.get('sparse', False):
valid_set = valid_set.get_value(borrow=True)
test_set = test_set.get_value(borrow=True)
# Prefilter features, if needed.
if features is not None:
valid_set = valid_set[:, features]
test_set = test_set[:, features]
# Valid and test representations
valid_repr = transform_valid(valid_set)
test_repr = transform_test(test_set)
# Convert into text info
save_submission(conf, valid_repr, test_repr)
##################################################
# Proxies for representation evaluations
##################################################
def compute_alc(valid_repr, test_repr):
"""
Returns the ALC of the valid set VS test set
Note: This proxy won't work in the case of transductive learning
(This is an assumption) but it seems to be a good proxy in the
normal case (i.e only train on training set)
Parameters
----------
valid_repr : WRITEME
test_repr : WRITEME
Returns
-------
WRITEME
"""
# Concatenate the sets, and give different one hot labels for valid and test
n_valid = valid_repr.shape[0]
n_test = test_repr.shape[0]
_labvalid = numpy.hstack((numpy.ones((n_valid, 1)),
numpy.zeros((n_valid, 1))))
_labtest = numpy.hstack((numpy.zeros((n_test, 1)),
numpy.ones((n_test, 1))))
dataset = numpy.vstack((valid_repr, test_repr))
label = numpy.vstack((_labvalid, _labtest))
logger.info('... computing the ALC')
raise NotImplementedError("This got broken by embed no longer being "
"where it used to be (if it even still exists, I haven't "
"looked for it)")
# return embed.score(dataset, label)
def lookup_alc(data, transform):
"""
.. todo::
WRITEME
"""
valid_repr = transform(data[1].get_value(borrow=True))
test_repr = transform(data[2].get_value(borrow=True))
return compute_alc(valid_repr, test_repr)
| bsd-3-clause |
ephes/scikit-learn | examples/linear_model/plot_ols_3d.py | 347 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
roxyboy/scikit-learn | sklearn/utils/multiclass.py | 83 | 12343 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
roxyboy/scikit-learn | sklearn/metrics/pairwise.py | 103 | 42995 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
rv816/lightfm | examples/movielens/data.py | 11 | 3560 | import itertools
import os
import zipfile
import numpy as np
import requests
import scipy.sparse as sp
def _get_movielens_path():
"""
Get path to the movielens dataset file.
"""
return os.path.join(os.path.dirname(os.path.abspath(__file__)),
'movielens.zip')
def _download_movielens(dest_path):
"""
Download the dataset.
"""
url = 'http://files.grouplens.org/datasets/movielens/ml-100k.zip'
req = requests.get(url, stream=True)
with open(dest_path, 'wb') as fd:
for chunk in req.iter_content():
fd.write(chunk)
def _get_raw_movielens_data():
"""
Return the raw lines of the train and test files.
"""
path = _get_movielens_path()
if not os.path.isfile(path):
_download_movielens(path)
with zipfile.ZipFile(path) as datafile:
return (datafile.read('ml-100k/ua.base').decode().split('\n'),
datafile.read('ml-100k/ua.test').decode().split('\n'))
def _parse(data):
"""
Parse movielens dataset lines.
"""
for line in data:
if not line:
continue
uid, iid, rating, timestamp = [int(x) for x in line.split('\t')]
yield uid, iid, rating, timestamp
def _build_interaction_matrix(rows, cols, data):
"""
Build the training matrix (no_users, no_items),
with ratings >= 4.0 being marked as positive and
the rest as negative.
"""
mat = sp.lil_matrix((rows, cols), dtype=np.int32)
for uid, iid, rating, timestamp in data:
if rating >= 4.0:
mat[uid, iid] = 1.0
else:
mat[uid, iid] = -1.0
return mat.tocoo()
def _get_movie_raw_metadata():
"""
Get raw lines of the genre file.
"""
path = _get_movielens_path()
if not os.path.isfile(path):
_download_movielens(path)
with zipfile.ZipFile(path) as datafile:
return datafile.read('ml-100k/u.item').decode(errors='ignore').split('\n')
def get_movielens_item_metadata(use_item_ids):
"""
Build a matrix of genre features (no_items, no_features).
If use_item_ids is True, per-item feeatures will also be used.
"""
features = {}
genre_set = set()
for line in _get_movie_raw_metadata():
if not line:
continue
splt = line.split('|')
item_id = int(splt[0])
genres = [idx for idx, val in
zip(range(len(splt[5:])), splt[5:])
if int(val) > 0]
if use_item_ids:
# Add item-specific features too
genres.append(item_id)
for genre_id in genres:
genre_set.add(genre_id)
features[item_id] = genres
mat = sp.lil_matrix((len(features) + 1,
len(genre_set)),
dtype=np.int32)
for item_id, genre_ids in features.items():
for genre_id in genre_ids:
mat[item_id, genre_id] = 1
return mat
def get_movielens_data():
"""
Return (train_interactions, test_interactions).
"""
train_data, test_data = _get_raw_movielens_data()
uids = set()
iids = set()
for uid, iid, rating, timestamp in itertools.chain(_parse(train_data),
_parse(test_data)):
uids.add(uid)
iids.add(iid)
rows = max(uids) + 1
cols = max(iids) + 1
return (_build_interaction_matrix(rows, cols, _parse(train_data)),
_build_interaction_matrix(rows, cols, _parse(test_data)))
| apache-2.0 |
dotsdl/msmbuilder | msmbuilder/tests/test_metzner_mcmc.py | 2 | 3416 | import numpy as np
from msmbuilder.cluster import NDGrid
from msmbuilder.example_datasets import load_doublewell
from msmbuilder.msm import BayesianMarkovStateModel
from msmbuilder.msm import MarkovStateModel
from msmbuilder.msm._metzner_mcmc_fast import metzner_mcmc_fast
from msmbuilder.msm._metzner_mcmc_slow import metzner_mcmc_slow
def test_1():
Z = np.array([[1, 10, 2], [2, 26, 3], [15, 20, 20]]).astype(np.double)
value1 = list(metzner_mcmc_fast(Z, 4, n_thin=1, random_state=0))
value2 = list(metzner_mcmc_slow(Z, 4, n_thin=1, random_state=0))
np.testing.assert_array_almost_equal(np.array(value1), np.array(value2))
value3 = list(metzner_mcmc_fast(Z, 4, n_thin=2, random_state=0))
value4 = list(metzner_mcmc_slow(Z, 4, n_thin=2, random_state=0))
np.testing.assert_array_almost_equal(np.array(value3), np.array(value4))
np.testing.assert_array_almost_equal(
np.array(value1)[1::2], np.array(value3))
def test_2():
Z = np.array([[5., 2.], [1., 10.]])
value1 = list(metzner_mcmc_fast(Z, 100, n_thin=1, random_state=0))
value2 = list(metzner_mcmc_slow(Z, 100, n_thin=1, random_state=0))
np.testing.assert_array_almost_equal(np.array(value1), np.array(value2))
assert np.all(np.array(value1) > 0)
def test_3():
trajectory = [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
1, 1, 1, 1, 1, 2, 2, 2, 0, 0, 0, 2, 2, 2, 0, 0, 0]
msm1 = BayesianMarkovStateModel(
sampler='metzner', n_steps=1, n_samples=100, n_chains=1, random_state=0)
msm1.fit([trajectory])
msm2 = BayesianMarkovStateModel(
sampler='metzner_py', n_steps=1, n_samples=100, n_chains=1, random_state=0)
msm2.fit([trajectory])
np.testing.assert_array_almost_equal(
msm1.all_transmats_,
msm2.all_transmats_)
assert msm1.all_timescales_.shape == (100, 2)
assert msm1.all_eigenvalues_.shape == (100, 3)
assert msm1.all_left_eigenvectors_.shape == (100, 3, 3)
assert msm1.all_right_eigenvectors_.shape == (100, 3, 3)
assert msm1.all_populations_.shape == (100, 3)
np.testing.assert_array_almost_equal(
msm1.all_populations_.sum(axis=1),
np.ones(100))
def test_4():
trajectory = [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
1, 1, 1, 1, 1, 2, 2, 2, 0, 0, 0, 2, 2, 2, 0, 0, 0]
msm1 = BayesianMarkovStateModel(
n_steps=3, n_samples=10, n_chains=1, random_state=0).fit([trajectory])
assert msm1.all_transmats_.shape[0] == 10
msm2 = BayesianMarkovStateModel(
n_steps=4, n_samples=10, n_chains=3, random_state=0).fit([trajectory])
assert msm2.all_transmats_.shape[0] == 10
def test_5():
trjs = load_doublewell(random_state=0)['trajectories']
clusterer = NDGrid(n_bins_per_feature=5)
mle_msm = MarkovStateModel(lag_time=100, verbose=False)
b_msm = BayesianMarkovStateModel(
lag_time=100, n_samples=1000, n_chains=8, n_steps=1000,
random_state=0)
states = clusterer.fit_transform(trjs)
b_msm.fit(states)
mle_msm.fit(states)
# this is a pretty silly test. it checks that the mean transition
# matrix is not so dissimilar from the MLE transition matrix.
# This shouldn't necessarily be the case anyways -- the likelihood is
# not "symmetric". And the cutoff chosen is just heuristic.
assert np.linalg.norm(b_msm.all_transmats_.mean(axis=0) - mle_msm.transmat_) < 1e-2
| lgpl-2.1 |
unreal666/namebench | nb_third_party/dns/rdata.py | 215 | 14860 | # Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdata.
@var _rdata_modules: A dictionary mapping a (rdclass, rdtype) tuple to
the module which implements that type.
@type _rdata_modules: dict
@var _module_prefix: The prefix to use when forming modules names. The
default is 'dns.rdtypes'. Changing this value will break the library.
@type _module_prefix: string
@var _hex_chunk: At most this many octets that will be represented in each
chunk of hexstring that _hexify() produces before whitespace occurs.
@type _hex_chunk: int"""
import cStringIO
import dns.exception
import dns.rdataclass
import dns.rdatatype
import dns.tokenizer
_hex_chunksize = 32
def _hexify(data, chunksize=None):
"""Convert a binary string into its hex encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
@rtype: string
"""
if chunksize is None:
chunksize = _hex_chunksize
hex = data.encode('hex_codec')
l = len(hex)
if l > chunksize:
chunks = []
i = 0
while i < l:
chunks.append(hex[i : i + chunksize])
i += chunksize
hex = ' '.join(chunks)
return hex
_base64_chunksize = 32
def _base64ify(data, chunksize=None):
"""Convert a binary string into its base64 encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is
L{dns.rdata._base64_chunksize}
@rtype: string
"""
if chunksize is None:
chunksize = _base64_chunksize
b64 = data.encode('base64_codec')
b64 = b64.replace('\n', '')
l = len(b64)
if l > chunksize:
chunks = []
i = 0
while i < l:
chunks.append(b64[i : i + chunksize])
i += chunksize
b64 = ' '.join(chunks)
return b64
__escaped = {
'"' : True,
'\\' : True,
}
def _escapify(qstring):
"""Escape the characters in a quoted string which need it.
@param qstring: the string
@type qstring: string
@returns: the escaped string
@rtype: string
"""
text = ''
for c in qstring:
if c in __escaped:
text += '\\' + c
elif ord(c) >= 0x20 and ord(c) < 0x7F:
text += c
else:
text += '\\%03d' % ord(c)
return text
def _truncate_bitmap(what):
"""Determine the index of greatest byte that isn't all zeros, and
return the bitmap that contains all the bytes less than that index.
@param what: a string of octets representing a bitmap.
@type what: string
@rtype: string
"""
for i in xrange(len(what) - 1, -1, -1):
if what[i] != '\x00':
break
return ''.join(what[0 : i + 1])
class Rdata(object):
"""Base class for all DNS rdata types.
"""
__slots__ = ['rdclass', 'rdtype']
def __init__(self, rdclass, rdtype):
"""Initialize an rdata.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
"""
self.rdclass = rdclass
self.rdtype = rdtype
def covers(self):
"""DNS SIG/RRSIG rdatas apply to a specific type; this type is
returned by the covers() function. If the rdata type is not
SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
creating rdatasets, allowing the rdataset to contain only RRSIGs
of a particular type, e.g. RRSIG(NS).
@rtype: int
"""
return dns.rdatatype.NONE
def extended_rdatatype(self):
"""Return a 32-bit type value, the least significant 16 bits of
which are the ordinary DNS type, and the upper 16 bits of which are
the "covered" type, if any.
@rtype: int
"""
return self.covers() << 16 | self.rdtype
def to_text(self, origin=None, relativize=True, **kw):
"""Convert an rdata to text format.
@rtype: string
"""
raise NotImplementedError
def to_wire(self, file, compress = None, origin = None):
"""Convert an rdata to wire format.
@rtype: string
"""
raise NotImplementedError
def to_digestable(self, origin = None):
"""Convert rdata to a format suitable for digesting in hashes. This
is also the DNSSEC canonical form."""
f = cStringIO.StringIO()
self.to_wire(f, None, origin)
return f.getvalue()
def validate(self):
"""Check that the current contents of the rdata's fields are
valid. If you change an rdata by assigning to its fields,
it is a good idea to call validate() when you are done making
changes.
"""
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
def __repr__(self):
covers = self.covers()
if covers == dns.rdatatype.NONE:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
str(self) + '>'
def __str__(self):
return self.to_text()
def _cmp(self, other):
"""Compare an rdata with another rdata of the same rdtype and
rdclass. Return < 0 if self < other in the DNSSEC ordering,
0 if self == other, and > 0 if self > other.
"""
raise NotImplementedError
def __eq__(self, other):
if not isinstance(other, Rdata):
return False
if self.rdclass != other.rdclass or \
self.rdtype != other.rdtype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Rdata):
return True
if self.rdclass != other.rdclass or \
self.rdtype != other.rdtype:
return True
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or \
self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or \
self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or \
self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or \
self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) > 0
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
"""Build an rdata object from text format.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
from_text = classmethod(from_text)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
"""Build an rdata object from wire format
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offet in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
"""Convert any domain names in the rdata to the specified
relativization.
"""
pass
class GenericRdata(Rdata):
"""Generate Rdata Class
This class is used for rdata types for which we have no better
implementation. It implements the DNS "unknown RRs" scheme.
"""
__slots__ = ['data']
def __init__(self, rdclass, rdtype, data):
super(GenericRdata, self).__init__(rdclass, rdtype)
self.data = data
def to_text(self, origin=None, relativize=True, **kw):
return r'\# %d ' % len(self.data) + _hexify(self.data)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
token = tok.get()
if not token.is_identifier() or token.value != '\#':
raise dns.exception.SyntaxError(r'generic rdata does not start with \#')
length = tok.get_int()
chunks = []
while 1:
token = tok.get()
if token.is_eol_or_eof():
break
chunks.append(token.value)
hex = ''.join(chunks)
data = hex.decode('hex_codec')
if len(data) != length:
raise dns.exception.SyntaxError('generic rdata hex data has wrong length')
return cls(rdclass, rdtype, data)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
file.write(self.data)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
return cls(rdclass, rdtype, wire[current : current + rdlen])
from_wire = classmethod(from_wire)
def _cmp(self, other):
return cmp(self.data, other.data)
_rdata_modules = {}
_module_prefix = 'dns.rdtypes'
def get_rdata_class(rdclass, rdtype):
def import_module(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
mod = _rdata_modules.get((rdclass, rdtype))
rdclass_text = dns.rdataclass.to_text(rdclass)
rdtype_text = dns.rdatatype.to_text(rdtype)
rdtype_text = rdtype_text.replace('-', '_')
if not mod:
mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
if not mod:
try:
mod = import_module('.'.join([_module_prefix,
rdclass_text, rdtype_text]))
_rdata_modules[(rdclass, rdtype)] = mod
except ImportError:
try:
mod = import_module('.'.join([_module_prefix,
'ANY', rdtype_text]))
_rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
except ImportError:
mod = None
if mod:
cls = getattr(mod, rdtype_text)
else:
cls = GenericRdata
return cls
def from_text(rdclass, rdtype, tok, origin = None, relativize = True):
"""Build an rdata object from text format.
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_text() class method is called
with the parameters to this function.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: Should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance"""
if isinstance(tok, str):
tok = dns.tokenizer.Tokenizer(tok)
cls = get_rdata_class(rdclass, rdtype)
if cls != GenericRdata:
# peek at first token
token = tok.get()
tok.unget(token)
if token.is_identifier() and \
token.value == r'\#':
#
# Known type using the generic syntax. Extract the
# wire form from the generic syntax, and then run
# from_wire on it.
#
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
relativize)
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
origin)
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
def from_wire(rdclass, rdtype, wire, current, rdlen, origin = None):
"""Build an rdata object from wire format
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_wire() class method is called
with the parameters to this function.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offet in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance"""
cls = get_rdata_class(rdclass, rdtype)
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
| apache-2.0 |
ephes/scikit-learn | examples/applications/plot_prediction_latency.py | 233 | 11277 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
yinwenpeng/rescale | en/parser/nltk_lite/probability.py | 10 | 59364 | # Natural Language Toolkit: Probability and Statistics
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au> (additions)
# Trevor Cohn <tacohn@cs.mu.oz.au> (additions)
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: probability.py 3498 2006-10-14 05:30:32Z stevenbird $
_NINF = float('-1e300')
"""
Classes for representing and processing probabilistic information.
The L{FreqDist} class is used to encode X{frequency distributions},
which count the number of times that each outcome of an experiment
occurs.
The L{ProbDistI} class defines a standard interface for X{probability
distributions}, which encode the probability of each outcome for an
experiment. There are two types of probability distribution:
- X{derived probability distributions} are created from frequency
distributions. They attempt to model the probability distribution
that generated the frequency distribution.
- X{analytic probability distributions} are created directly from
parameters (such as variance).
The L{ConditionalFreqDist} class and L{ConditionalProbDistI} interface
are used to encode conditional distributions. Conditional probability
distributions can be derived or analytic; but currently the only
implementation of the C{ConditionalProbDistI} interface is
L{ConditionalProbDist}, a derived distribution.
"""
import types, math
try: import numpy
except: pass
##//////////////////////////////////////////////////////
## Frequency Distributions
##//////////////////////////////////////////////////////
class FreqDist(object):
"""
A frequency distribution for the outcomes of an experiment. A
frequency distribution records the number of times each outcome of
an experiment has occured. For example, a frequency distribution
could be used to record the frequency of each word type in a
document. Formally, a frequency distribution can be defined as a
function mapping from each sample to the number of times that
sample occured as an outcome.
Frequency distributions are generally constructed by running a
number of experiments, and incrementing the count for a sample
every time it is an outcome of an experiment. For example, the
following code will produce a frequency distribution that encodes
how often each word occurs in a text:
>>> fdist = FreqDist()
>>> for word in tokenize.whitespace(sent):
... fdist.inc(word)
"""
def __init__(self):
"""
Construct a new empty, C{FreqDist}. In particular, the count
for every sample is zero.
"""
self._count = {}
self._N = 0
self._Nr_cache = None
self._max_cache = None
def inc(self, sample, count=1):
"""
Increment this C{FreqDist}'s count for the given
sample.
@param sample: The sample whose count should be incremented.
@type sample: any
@param count: The amount to increment the sample's count by.
@type count: C{int}
@rtype: None
@raise NotImplementedError: If C{sample} is not a
supported sample type.
"""
if count == 0: return
self._N += count
self._count[sample] = self._count.get(sample,0) + count
# Invalidate the Nr cache and max cache.
self._Nr_cache = None
self._max_cache = None
def N(self):
"""
@return: The total number of sample outcomes that have been
recorded by this C{FreqDist}. For the number of unique
sample values (or bins) with counts greater than zero, use
C{FreqDist.B()}.
@rtype: C{int}
"""
return self._N
def B(self):
"""
@return: The total number of sample values (or X{bins}) that
have counts greater than zero. For the total
number of sample outcomes recorded, use C{FreqDist.N()}.
@rtype: C{int}
"""
return len(self._count)
def samples(self):
"""
@return: A list of all samples that have been recorded as
outcomes by this frequency distribution. Use C{count()}
to determine the count for each sample.
@rtype: C{list}
"""
return self._count.keys()
def Nr(self, r, bins=None):
"""
@return: The number of samples with count r.
@rtype: C{int}
@type r: C{int}
@param r: A sample count.
@type bins: C{int}
@param bins: The number of possible sample outcomes. C{bins}
is used to calculate Nr(0). In particular, Nr(0) is
C{bins-self.B()}. If C{bins} is not specified, it
defaults to C{self.B()} (so Nr(0) will be 0).
"""
if r < 0: raise IndexError, 'FreqDist.Nr(): r must be non-negative'
# Special case for Nr(0):
if r == 0:
if bins is None: return 0
else: return bins-self.B()
# We have to search the entire distribution to find Nr. Since
# this is an expensive operation, and is likely to be used
# repeatedly, cache the results.
if self._Nr_cache is None:
self._cache_Nr_values()
if r >= len(self._Nr_cache): return 0
return self._Nr_cache[r]
def _cache_Nr_values(self):
Nr = [0]
for sample in self.samples():
c = self._count.get(sample, 0)
if c >= len(Nr):
Nr += [0]*(c+1-len(Nr))
Nr[c] += 1
self._Nr_cache = Nr
def count(self, sample):
"""
Return the count of a given sample. The count of a sample is
defined as the number of times that sample outcome was
recorded by this C{FreqDist}. Counts are non-negative
integers.
@return: The count of a given sample.
@rtype: C{int}
@param sample: the sample whose count
should be returned.
@type sample: any.
"""
return self._count.get(sample, 0)
def freq(self, sample):
"""
Return the frequency of a given sample. The frequency of a
sample is defined as the count of that sample divided by the
total number of sample outcomes that have been recorded by
this C{FreqDist}. The count of a sample is defined as the
number of times that sample outcome was recorded by this
C{FreqDist}. Frequencies are always real numbers in the range
[0, 1].
@return: The frequency of a given sample.
@rtype: float
@param sample: the sample whose frequency
should be returned.
@type sample: any
"""
if self._N is 0: return 0
return float(self._count.get(sample, 0)) / self._N
def max(self):
"""
Return the sample with the greatest number of outcomes in this
frequency distribution. If two or more samples have the same
number of outcomes, return one of them; which sample is
returned is undefined. If no outcomes have occured in this
frequency distribution, return C{None}.
@return: The sample with the maximum number of outcomes in this
frequency distribution.
@rtype: any or C{None}
"""
if self._max_cache is None:
best_sample = None
best_count = -1
for sample in self._count.keys():
if self._count[sample] > best_count:
best_sample = sample
best_count = self._count[sample]
self._max_cache = best_sample
return self._max_cache
def sorted_samples(self):
"""
Return the samples sorted in decreasing order of frequency. Instances
with the same count will be arbitrarily ordered. Instances with a
count of zero will be omitted. This method is C{O(N^2)}, where C{N} is
the number of samples, but will complete in a shorter time on average.
@return: The set of samples in sorted order.
@rtype: sequence of any
"""
items = [(-count,sample) for (sample,count) in self._count.items()]
items.sort()
return [sample for (neg_count,sample) in items]
def __repr__(self):
"""
@return: A string representation of this C{FreqDist}.
@rtype: string
"""
return '<FreqDist with %d samples>' % self.N()
def __str__(self):
"""
@return: A string representation of this C{FreqDist}.
@rtype: string
"""
samples = self.sorted_samples()
items = ['%r: %r' % (s, self._count[s]) for s in samples]
return '<FreqDist: %s>' % ', '.join(items)
def __contains__(self, sample):
"""
@return: True if the given sample occurs one or more times in
this frequency distribution.
@rtype: C{boolean}
@param sample: The sample to search for.
@type sample: any
"""
return self._count.has_key(sample)
##//////////////////////////////////////////////////////
## Probability Distributions
##//////////////////////////////////////////////////////
class ProbDistI(object):
"""
A probability distribution for the outcomes of an experiment. A
probability distribution specifies how likely it is that an
experiment will have any given outcome. For example, a
probability distribution could be used to predict the probability
that a token in a document will have a given type. Formally, a
probability distribution can be defined as a function mapping from
samples to nonnegative real numbers, such that the sum of every
number in the function's range is 1.0. C{ProbDist}s are often
used to model the probability distribution of the experiment used
to generate a frequency distribution.
"""
def __init__(self):
if self.__class__ == ProbDistI:
raise AssertionError, "Interfaces can't be instantiated"
def prob(self, sample):
"""
@return: the probability for a given sample. Probabilities
are always real numbers in the range [0, 1].
@rtype: float
@param sample: The sample whose probability
should be returned.
@type sample: any
"""
raise AssertionError()
def logprob(self, sample):
"""
@return: the natural logarithm of the probability for a given
sample. Log probabilities range from negitive infinity to
zero.
@rtype: float
@param sample: The sample whose probability
should be returned.
@type sample: any
"""
# Default definition, in terms of prob()
p = self.prob(sample)
if p == 0:
# Use some approximation to infinity. What this does
# depends on your system's float implementation.
return _NINF
else:
return math.log(p)
def max(self):
"""
@return: the sample with the greatest probability. If two or
more samples have the same probability, return one of them;
which sample is returned is undefined.
@rtype: any
"""
raise AssertionError()
def samples(self):
"""
@return: A list of all samples that have nonzero
probabilities. Use C{prob} to find the probability of
each sample.
@rtype: C{list}
"""
raise AssertionError()
class UniformProbDist(ProbDistI):
"""
A probability distribution that assigns equal probability to each
sample in a given set; and a zero probability to all other
samples.
"""
def __init__(self, samples):
"""
Construct a new uniform probability distribution, that assigns
equal probability to each sample in C{samples}.
@param samples: The samples that should be given uniform
probability.
@type samples: C{list}
@raise ValueError: If C{samples} is empty.
"""
if len(samples) == 0:
raise ValueError('A Uniform probability distribution must '+
'have at least one sample.')
self._sampleset = set(samples)
self._prob = 1.0/len(self._sampleset)
self._samples = list(self._sampleset)
def prob(self, sample):
if sample in self._sampleset: return self._prob
else: return 0
def max(self): return self._samples[0]
def samples(self): return self._samples
def __repr__(self):
return '<UniformProbDist with %d samples>' % len(self._sampleset)
class DictionaryProbDist(ProbDistI):
"""
A probability distribution whose probabilities are directly
specified by a given dictionary. The given dictionary maps
samples to probabilities.
"""
def __init__(self, prob_dict=None, log=False, normalize=False):
"""
Construct a new probability distribution from the given
dictionary, which maps values to probabilities (or to log
probabilities, if C{log} is true). If C{normalize} is
true, then the probability values are scaled by a constant
factor such that they sum to 1.
"""
self._prob_dict = prob_dict.copy()
self._log = log
# Normalize the distribution, if requested.
if normalize:
if log:
value_sum = sum_logs(self._prob_dict.values())
if value_sum <= _NINF:
logp = math.log(1.0/len(prob_dict.keys()))
for x in prob_dict.keys():
self._prob_dict[x] = logp
else:
for (x, p) in self._prob_dict.items():
self._prob_dict[x] -= value_sum
else:
value_sum = sum(self._prob_dict.values())
if value_sum == 0:
p = 1.0/len(prob_dict.keys())
for x in prob_dict.keys():
self._prob_dict[x] = p
else:
norm_factor = 1.0/value_sum
for (x, p) in self._prob_dict.items():
self._prob_dict[x] *= norm_factor
def prob(self, sample):
if self._log:
if sample not in self._prob_dict: return 0
else: return math.exp(self._prob_dict[sample])
else:
return self._prob_dict.get(sample, 0)
def logprob(self, sample):
if self._log:
return self._prob_dict.get(sample, _NINF)
else:
if sample not in self._prob_dict: return _NINF
else: return math.log(self._prob_dict[sample])
def max(self):
if not hasattr(self, '_max'):
self._max = max([(p,v) for (v,p) in self._prob_dict.items()])[1]
return self._max
def samples(self):
return self._prob_dict.keys()
def __repr__(self):
return '<ProbDist with %d samples>' % len(self._prob_dict)
class MLEProbDist(ProbDistI):
"""
The maximum likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
X{maximum likelihood estimate} approximates the probability of
each sample as the frequency of that sample in the frequency
distribution.
"""
def __init__(self, freqdist):
"""
Use the maximum likelihood estimate to create a probability
distribution for the experiment used to generate C{freqdist}.
@type freqdist: C{FreqDist}
@param freqdist: The frequency distribution that the
probability estimates should be based on.
"""
if freqdist.N() == 0:
raise ValueError('An MLE probability distribution must '+
'have at least one sample.')
self._freqdist = freqdist
def freqdist(self):
"""
@return: The frequency distribution that this probability
distribution is based on.
@rtype: C{FreqDist}
"""
return self._freqdist
def prob(self, sample):
return self._freqdist.freq(sample)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.samples()
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<MLEProbDist based on %d samples>' % self._freqdist.N()
class LidstoneProbDist(ProbDistI):
"""
The Lidstone estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
C{Lidstone estimate} is paramaterized by a real number M{gamma},
which typically ranges from 0 to 1. The X{Lidstone estimate}
approximates the probability of a sample with count M{c} from an
experiment with M{N} outcomes and M{B} bins as
M{(c+gamma)/(N+B*gamma)}. This is equivalant to adding
M{gamma} to the count for each bin, and taking the maximum
likelihood estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, gamma, bins=None):
"""
Use the Lidstone estimate to create a probability distribution
for the experiment used to generate C{freqdist}.
@type freqdist: C{FreqDist}
@param freqdist: The frequency distribution that the
probability estimates should be based on.
@type gamma: C{float}
@param gamma: A real number used to paramaterize the
estimate. The Lidstone estimate is equivalant to adding
M{gamma} to the count for each bin, and taking the
maximum likelihood estimate of the resulting frequency
distribution.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
if (bins == 0) or (bins is None and freqdist.N() == 0):
name = self.__class__.__name__[:-8]
raise ValueError('A %s probability distribution ' % name +
'must have at least one bin.')
if (bins is not None) and (bins < freqdist.B()):
name = self.__class__.__name__[:-8]
raise ValueError('\nThe number of bins in a %s must be ' % name +
'greater than or equal to\nthe number of '+
'bins in the FreqDist used to create it.')
self._freqdist = freqdist
self._gamma = float(gamma)
self._N = self._freqdist.N()
if bins is None: bins = freqdist.B()
self._bins = bins
def freqdist(self):
"""
@return: The frequency distribution that this probability
distribution is based on.
@rtype: C{FreqDist}
"""
return self._freqdist
def prob(self, sample):
c = self._freqdist.count(sample)
return (c + self._gamma) / (self._N + self._bins * self._gamma)
def max(self):
# For Lidstone distributions, probability is monotonic with
# frequency, so the most probable sample is the one that
# occurs most frequently.
return self._freqdist.max()
def samples(self):
return self._freqdist.samples()
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<LidstoneProbDist based on %d samples>' % self._freqdist.N()
class LaplaceProbDist(LidstoneProbDist):
"""
The Laplace estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
X{Lidstone estimate} approximates the probability of a sample with
count M{c} from an experiment with M{N} outcomes and M{B} bins as
M{(c+1)/(N+B)}. This is equivalant to adding one to the count for
each bin, and taking the maximum likelihood estimate of the
resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the Laplace estimate to create a probability distribution
for the experiment used to generate C{freqdist}.
@type freqdist: C{FreqDist}
@param freqdist: The frequency distribution that the
probability estimates should be based on.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
LidstoneProbDist.__init__(self, freqdist, 1, bins)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<LaplaceProbDist based on %d samples>' % self._freqdist.N()
class ELEProbDist(LidstoneProbDist):
"""
The expected likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
X{expected likelihood estimate} approximates the probability of a
sample with count M{c} from an experiment with M{N} outcomes and
M{B} bins as M{(c+0.5)/(N+B/2)}. This is equivalant to adding 0.5
to the count for each bin, and taking the maximum likelihood
estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the expected likelihood estimate to create a probability
distribution for the experiment used to generate C{freqdist}.
@type freqdist: C{FreqDist}
@param freqdist: The frequency distribution that the
probability estimates should be based on.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<ELEProbDist based on %d samples>' % self._freqdist.N()
class HeldoutProbDist(ProbDistI):
"""
The heldout estimate for the probability distribution of the
experiment used to generate two frequency distributions. These
two frequency distributions are called the "heldout frequency
distribution" and the "base frequency distribution." The
X{heldout estimate} uses uses the X{heldout frequency
distribution} to predict the probability of each sample, given its
frequency in the X{base frequency distribution}.
In particular, the heldout estimate approximates the probability
for a sample that occurs M{r} times in the base distribution as
the average frequency in the heldout distribution of all samples
that occur M{r} times in the base distribution.
This average frequency is M{Tr[r]/(Nr[r]*N)}, where:
- M{Tr[r]} is the total count in the heldout distribution for
all samples that occur M{r} times in the base
distribution.
- M{Nr[r]} is the number of samples that occur M{r} times in
the base distribution.
- M{N} is the number of outcomes recorded by the heldout
frequency distribution.
In order to increase the efficiency of the C{prob} member
function, M{Tr[r]/(Nr[r]*N)} is precomputed for each value of M{r}
when the C{HeldoutProbDist} is created.
@type _estimate: C{list} of C{float}
@ivar _estimate: A list mapping from M{r}, the number of
times that a sample occurs in the base distribution, to the
probability estimate for that sample. C{_estimate[M{r}]} is
calculated by finding the average frequency in the heldout
distribution of all samples that occur M{r} times in the base
distribution. In particular, C{_estimate[M{r}]} =
M{Tr[r]/(Nr[r]*N)}.
@type _max_r: C{int}
@ivar _max_r: The maximum number of times that any sample occurs
in the base distribution. C{_max_r} is used to decide how
large C{_estimate} must be.
"""
def __init__(self, base_fdist, heldout_fdist, bins=None):
"""
Use the heldout estimate to create a probability distribution
for the experiment used to generate C{base_fdist} and
C{heldout_fdist}.
@type base_fdist: C{FreqDist}
@param base_fdist: The base frequency distribution.
@type heldout_fdist: C{FreqDist}
@param heldout_fdist: The heldout frequency distribution.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
self._base_fdist = base_fdist
self._heldout_fdist = heldout_fdist
# The max number of times any sample occurs in base_fdist.
self._max_r = base_fdist.count(base_fdist.max())
# Calculate Tr, Nr, and N.
Tr = self._calculate_Tr()
Nr = [base_fdist.Nr(r, bins) for r in range(self._max_r+1)]
N = heldout_fdist.N()
# Use Tr, Nr, and N to compute the probability estimate for
# each value of r.
self._estimate = self._calculate_estimate(Tr, Nr, N)
def _calculate_Tr(self):
"""
@return: the list M{Tr}, where M{Tr[r]} is the total count in
C{heldout_fdist} for all samples that occur M{r}
times in C{base_fdist}.
@rtype: C{list} of C{float}
"""
Tr = [0.0] * (self._max_r+1)
for sample in self._heldout_fdist.samples():
r = self._base_fdist.count(sample)
Tr[r] += self._heldout_fdist.count(sample)
return Tr
def _calculate_estimate(self, Tr, Nr, N):
"""
@return: the list M{estimate}, where M{estimate[r]} is the
probability estimate for any sample that occurs M{r} times
in the base frequency distribution. In particular,
M{estimate[r]} is M{Tr[r]/(N[r]*N)}. In the special case
that M{N[r]=0}, M{estimate[r]} will never be used; so we
define M{estimate[r]=None} for those cases.
@rtype: C{list} of C{float}
@type Tr: C{list} of C{float}
@param Tr: the list M{Tr}, where M{Tr[r]} is the total count in
the heldout distribution for all samples that occur M{r}
times in base distribution.
@type Nr: C{list} of C{float}
@param Nr: The list M{Nr}, where M{Nr[r]} is the number of
samples that occur M{r} times in the base distribution.
@type N: C{int}
@param N: The total number of outcomes recorded by the heldout
frequency distribution.
"""
estimate = []
for r in range(self._max_r+1):
if Nr[r] == 0: estimate.append(None)
else: estimate.append(Tr[r]/(Nr[r]*N))
return estimate
def base_fdist(self):
"""
@return: The base frequency distribution that this probability
distribution is based on.
@rtype: C{FreqDist}
"""
return self._base_fdist
def heldout_fdist(self):
"""
@return: The heldout frequency distribution that this
probability distribution is based on.
@rtype: C{FreqDist}
"""
return self._heldout_fdist
def prob(self, sample):
# Use our precomputed probability estimate.
r = self._base_fdist.count(sample)
return self._estimate[r]
def max(self):
# Note: the Heldout estimation is *not* necessarily monotonic;
# so this implementation is currently broken. However, it
# should give the right answer *most* of the time. :)
return self._base_fdist.max()
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
s = '<HeldoutProbDist: %d base samples; %d heldout samples>'
return s % (self._base_fdist.N(), self._heldout_fdist.N())
class CrossValidationProbDist(ProbDistI):
"""
The cross-validation estimate for the probability distribution of
the experiment used to generate a set of frequency distribution.
The X{cross-validation estimate} for the probability of a sample
is found by averaging the held-out estimates for the sample in
each pair of frequency distributions.
"""
def __init__(self, freqdists, bins):
"""
Use the cross-validation estimate to create a probability
distribution for the experiment used to generate
C{freqdists}.
@type freqdists: C{list} of C{FreqDist}
@param freqdists: A list of the frequency distributions
generated by the experiment.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
self._freqdists = freqdists
# Create a heldout probability distribution for each pair of
# frequency distributions in freqdists.
self._heldout_probdists = []
for fdist1 in freqdists:
for fdist2 in freqdists:
if fdist1 is not fdist2:
probdist = HeldoutProbDist(fdist1, fdist2, bins)
self._heldout_probdists.append(probdist)
def freqdists(self):
"""
@rtype: C{list} of C{FreqDist}
@return: The list of frequency distributions that this
C{ProbDist} is based on.
"""
return self._freqdists
def prob(self, sample):
# Find the average probability estimate returned by each
# heldout distribution.
prob = 0.0
for heldout_probdist in self._heldout_probdists:
prob += heldout_probdist.prob(sample)
return prob/len(self._heldout_probdists)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<CrossValidationProbDist: %d-way>' % len(self._freqdists)
class WittenBellProbDist(ProbDistI):
"""
The Witten-Bell estimate of a probability distribution. This distribution
allocates uniform probability mass to as yet unseen events by using the
number of events that have only been seen once. The probability mass
reserved for unseen events is equal to:
- M{T / (N + T)}
where M{T} is the number of observed event types and M{N} is the total
number of observed events. This equates to the maximum likelihood estimate
of a new type event occuring. The remaining probability mass is discounted
such that all probability estimates sum to one, yielding:
- M{p = T / Z (N + T)}, if count = 0
- M{p = c / (N + T)}, otherwise
"""
def __init__(self, freqdist, bins=None):
"""
Creates a distribution of Witten-Bell probability estimates. This
distribution allocates uniform probability mass to as yet unseen
events by using the number of events that have only been seen once.
The probability mass reserved for unseen events is equal to:
- M{T / (N + T)}
where M{T} is the number of observed event types and M{N} is the total
number of observed events. This equates to the maximum likelihood
estimate of a new type event occuring. The remaining probability mass
is discounted such that all probability estimates sum to one,
yielding:
- M{p = T / Z (N + T)}, if count = 0
- M{p = c / (N + T)}, otherwise
The parameters M{T} and M{N} are taken from the C{freqdist} parameter
(the C{B()} and C{N()} values). The normalising factor M{Z} is
calculated using these values along with the C{bins} parameter.
@param freqdist: The frequency counts upon which to base the
estimation.
@type freqdist: C{FreqDist}
@param bins: The number of possible event types. This must be
at least as large as the number of bins in the
C{freqdist}. If C{None}, then it's assumed to be
equal to that of the C{freqdist}
@type bins: C{Int}
"""
assert bins == None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins == None:
bins = freqdist.B()
self._freqdist = freqdist
self._T = self._freqdist.B()
self._Z = bins - self._freqdist.B()
self._N = self._freqdist.N()
def prob(self, sample):
# inherit docs from ProbDistI
c = self._freqdist.count(sample)
if c == 0:
return self._T / float(self._Z * (self._N + self._T))
else:
return c / float(self._N + self._T)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.samples()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<WittenBellProbDist based on %d samples>' % self._freqdist.N()
class GoodTuringProbDist(ProbDistI):
"""
The Good-Turing estimate of a probability distribution. This method
calculates the probability mass to assign to events with zero or low
counts based on the number of events with higher counts. It does so by
using the smoothed count M{c*}:
- M{c* = (c + 1) N(c + 1) / N(c)}
where M{c} is the original count, M{N(i)} is the number of event types
observed with count M{i}. These smoothed counts are then normalised to
yield a probability distribution.
"""
# TODO - add a cut-off parameter, above which the counts are unmodified
# (see J&M p216)
def __init__(self, freqdist, bins):
"""
Creates a Good-Turing probability distribution estimate. This method
calculates the probability mass to assign to events with zero or low
counts based on the number of events with higher counts. It does so by
using the smoothed count M{c*}:
- M{c* = (c + 1) N(c + 1) / N(c)}
where M{c} is the original count, M{N(i)} is the number of event types
observed with count M{i}. These smoothed counts are then normalised to
yield a probability distribution.
The C{bins} parameter allows C{N(0)} to be estimated.
@param freqdist: The frequency counts upon which to base the
estimation.
@type freqdist: C{FreqDist}
@param bins: The number of possible event types. This must be
at least as large as the number of bins in the
C{freqdist}. If C{None}, then it's taken to be
equal to C{freqdist.B()}.
@type bins: C{Int}
"""
assert bins == None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins == None:
bins = freqdist.B()
self._freqdist = freqdist
self._bins = bins
def prob(self, sample):
# inherit docs from FreqDist
c = self._freqdist.count(sample)
nc = self._freqdist.Nr(c, self._bins)
ncn = self._freqdist.Nr(c + 1, self._bins)
# avoid divide-by-zero errors for sparse datasets
if nc == 0 or self._freqdist.N() == 0:
return 0.0
return float(c + 1) * ncn / (nc * self._freqdist.N())
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.samples()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<GoodTuringProbDist based on %d samples>' % self._freqdist.N()
class MutableProbDist(ProbDistI):
"""
An mutable probdist where the probabilities may be easily modified. This
simply copies an existing probdist, storing the probability values in a
mutable dictionary and providing an update method.
"""
def __init__(self, prob_dist, samples, store_logs=True):
"""
Creates the mutable probdist based on the given prob_dist and using
the list of samples given. These values are stored as log
probabilities if the store_logs flag is set.
@param prob_dist: the distribution from which to garner the
probabilities
@type prob_dist: ProbDist
@param samples: the complete set of samples
@type samples: sequence of any
@param store_logs: whether to store the probabilities as logarithms
@type store_logs: bool
"""
self._samples = samples
self._sample_dict = dict([(samples[i], i) for i in range(len(samples))])
try: self._data = numpy.zeros(len(samples), numpy.Float64)
except: pass
for i in range(len(samples)):
if store_logs:
self._data[i] = prob_dist.logprob(samples[i])
else:
self._data[i] = prob_dist.prob(samples[i])
self._logs = store_logs
def samples(self):
# inherit documentation
return self._samples
def prob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i != None:
if self._logs:
return exp(self._data[i])
else:
return self._data[i]
else:
return 0.0
def logprob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i != None:
if self._logs:
return self._data[i]
else:
return log(self._data[i])
else:
return float('-inf')
def update(self, sample, prob, log=True):
"""
Update the probability for the given sample. This may cause the object
to stop being the valid probability distribution - the user must
ensure that they update the sample probabilities such that all samples
have probabilities between 0 and 1 and that all probabilities sum to
one.
@param sample: the sample for which to update the probability
@type sample: C{any}
@param prob: the new probability
@type prob: C{float}
@param log: is the probability already logged
@type log: C{bool}
"""
i = self._sample_dict.get(sample)
assert i != None
if self._logs:
if log: self._data[i] = prob
else: self._data[i] = log(prob)
else:
if log: self._data[i] = exp(prob)
else: self._data[i] = prob
##//////////////////////////////////////////////////////
## Probability Distribution Operations
##//////////////////////////////////////////////////////
def log_likelihood(test_pdist, actual_pdist):
# Is this right?
return sum([actual_pdist.prob(s) * math.log(test_pdist.prob(s))
for s in actual_pdist.samples()])
##//////////////////////////////////////////////////////
## Conditional Distributions
##//////////////////////////////////////////////////////
class ConditionalFreqDist(object):
"""
A collection of frequency distributions for a single experiment
run under different conditions. Conditional frequency
distributions are used to record the number of times each sample
occured, given the condition under which the experiment was run.
For example, a conditional frequency distribution could be used to
record the frequency of each word (type) in a document, given its
length. Formally, a conditional frequency distribution can be
defined as a function that maps from each condition to the
C{FreqDist} for the experiment under that condition.
The frequency distribution for each condition is accessed using
the indexing operator:
>>> cfdist[3]
<FreqDist with 73 outcomes>
>>> cfdist[3].freq('the')
0.4
>>> cfdist[3].count('dog')
2
When the indexing operator is used to access the frequency
distribution for a condition that has not been accessed before,
C{ConditionalFreqDist} creates a new empty C{FreqDist} for that
condition.
Conditional frequency distributions are typically constructed by
repeatedly running an experiment under a variety of conditions,
and incrementing the sample outcome counts for the appropriate
conditions. For example, the following code will produce a
conditional frequency distribution that encodes how often each
word type occurs, given the length of that word type:
>>> cfdist = ConditionalFreqDist()
>>> for word in tokenize.whitespace(sent):
... condition = len(word)
... cfdist[condition].inc(word)
"""
def __init__(self):
"""
Construct a new empty conditional frequency distribution. In
particular, the count for every sample, under every condition,
is zero.
"""
self._fdists = {}
def __getitem__(self, condition):
"""
Return the frequency distribution that encodes the frequency
of each sample outcome, given that the experiment was run
under the given condition. If the frequency distribution for
the given condition has not been accessed before, then this
will create a new empty C{FreqDist} for that condition.
@return: The frequency distribution that encodes the frequency
of each sample outcome, given that the experiment was run
under the given condition.
@rtype: C{FreqDist}
@param condition: The condition under which the experiment was
run.
@type condition: any
"""
# Create the conditioned freq dist, if it doesn't exist
if not self._fdists.has_key(condition):
self._fdists[condition] = FreqDist()
return self._fdists[condition]
def conditions(self):
"""
@return: A list of the conditions that have been accessed for
this C{ConditionalFreqDist}. Use the indexing operator to
access the frequency distribution for a given condition.
Note that the frequency distributions for some conditions
may contain zero sample outcomes.
@rtype: C{list}
"""
return self._fdists.keys()
def __repr__(self):
"""
@return: A string representation of this
C{ConditionalFreqDist}.
@rtype: C{string}
"""
n = len(self._fdists)
return '<ConditionalFreqDist with %d conditions>' % n
class ConditionalProbDistI(object):
"""
A collection of probability distributions for a single experiment
run under different conditions. Conditional probability
distributions are used to estimate the likelihood of each sample,
given the condition under which the experiment was run. For
example, a conditional probability distribution could be used to
estimate the probability of each word type in a document, given
the length of the word type. Formally, a conditional probability
distribution can be defined as a function that maps from each
condition to the C{ProbDist} for the experiment under that
condition.
"""
def __init__(self):
raise AssertionError, 'ConditionalProbDistI is an interface'
def __getitem__(self, condition):
"""
@return: The probability distribution for the experiment run
under the given condition.
@rtype: C{ProbDistI}
@param condition: The condition whose probability distribution
should be returned.
@type condition: any
"""
raise AssertionError
def conditions(self):
"""
@return: A list of the conditions that are represented by
this C{ConditionalProbDist}. Use the indexing operator to
access the probability distribution for a given condition.
@rtype: C{list}
"""
raise AssertionError
# For now, this is the only implementation of ConditionalProbDistI;
# but we would want a different implementation if we wanted to build a
# conditional probability distribution analytically (e.g., a gaussian
# distribution), rather than basing it on an underlying frequency
# distribution.
class ConditionalProbDist(ConditionalProbDistI):
"""
A conditional probability distribution modelling the experiments
that were used to generate a conditional frequency distribution.
A C{ConditoinalProbDist} is constructed from a
C{ConditionalFreqDist} and a X{C{ProbDist} factory}:
- The B{C{ConditionalFreqDist}} specifies the frequency
distribution for each condition.
- The B{C{ProbDist} factory} is a function that takes a
condition's frequency distribution, and returns its
probability distribution. A C{ProbDist} class's name (such as
C{MLEProbDist} or C{HeldoutProbDist}) can be used to specify
that class's constructor.
The first argument to the C{ProbDist} factory is the frequency
distribution that it should model; and the remaining arguments are
specified by the C{factory_args} parameter to the
C{ConditionalProbDist} constructor. For example, the following
code constructs a C{ConditionalProbDist}, where the probability
distribution for each condition is an C{ELEProbDist} with 10 bins:
>>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
>>> print cpdist['run'].max()
'NN'
>>> print cpdist['run'].prob('NN')
0.0813
"""
def __init__(self, cfdist, probdist_factory,
supply_condition=False, *factory_args):
"""
Construct a new conditional probability distribution, based on
the given conditional frequency distribution and C{ProbDist}
factory.
@type cfdist: L{ConditionalFreqDist}
@param cfdist: The C{ConditionalFreqDist} specifying the
frequency distribution for each condition.
@type probdist_factory: C{class} or C{function}
@param probdist_factory: The function or class that maps
a condition's frequency distribution to its probability
distribution. The function is called with the frequency
distribution as its first argument, the condition as its
second argument (only if C{supply_condition=True}), and
C{factory_args} as its remaining arguments.
@type supply_condition: C{bool}
@param supply_condition: If true, then pass the condition as
the second argument to C{probdist_factory}.
@type factory_args: (any)
@param factory_args: Extra arguments for C{probdist_factory}.
These arguments are usually used to specify extra
properties for the probability distributions of individual
conditions, such as the number of bins they contain.
"""
self._probdist_factory = probdist_factory
self._cfdist = cfdist
self._supply_condition = supply_condition
self._factory_args = factory_args
self._pdists = {}
for c in cfdist.conditions():
if supply_condition:
pdist = probdist_factory(cfdist[c], c, *factory_args)
else:
pdist = probdist_factory(cfdist[c], *factory_args)
self._pdists[c] = pdist
def __getitem__(self, condition):
if not self._pdists.has_key(condition):
# If it's a condition we haven't seen, create a new prob
# dist from the empty freq dist. Typically, this will
# give a uniform prob dist.
pdist = self._probdist_factory(FreqDist(), *self._factory_args)
self._pdists[condition] = pdist
return self._pdists[condition]
def conditions(self):
return self._pdists.keys()
def __repr__(self):
"""
@return: A string representation of this
C{ConditionalProbDist}.
@rtype: C{string}
"""
n = len(self._pdists)
return '<ConditionalProbDist with %d conditions>' % n
class DictionaryConditionalProbDist(ConditionalProbDistI):
"""
An alternative ConditionalProbDist that simply wraps a dictionary of
ProbDists rather than creating these from FreqDists.
"""
def __init__(self, probdist_dict):
"""
@param probdist_dict: a dictionary containing the probdists indexed
by the conditions
@type probdist_dict: dict any -> probdist
"""
self._dict = probdist_dict
def __getitem__(self, condition):
# inherit documentation
# this will cause an exception for unseen conditions
return self._dict[condition]
def conditions(self):
# inherit documentation
return self._dict.keys()
##//////////////////////////////////////////////////////
## Adding in log-space.
##//////////////////////////////////////////////////////
# If the difference is bigger than this, then just take the bigger one:
_ADD_LOGS_MAX_DIFF = math.log(1e-30)
def add_logs(logx, logy):
"""
Given two numbers C{logx}=M{log(x)} and C{logy}=M{log(y)}, return
M{log(x+y)}. Conceptually, this is the same as returning
M{log(exp(C{logx})+exp(C{logy}))}, but the actual implementation
avoids overflow errors that could result from direct computation.
"""
if (logx < logy + _ADD_LOGS_MAX_DIFF):
return logy
if (logy < logx + _ADD_LOGS_MAX_DIFF):
return logx
base = min(logx, logy)
return base + math.log(math.exp(logx-base) + math.exp(logy-base))
def sum_logs(logs):
if len(logs) == 0:
# Use some approximation to infinity. What this does
# depends on your system's float implementation.
return _NINF
else:
return reduce(add_logs, logs[1:], logs[0])
##//////////////////////////////////////////////////////
## Probabilistic Mix-in
##//////////////////////////////////////////////////////
class ProbabilisticMixIn(object):
"""
A mix-in class to associate probabilities with other classes
(trees, rules, etc.). To use the C{ProbabilisticMixIn} class,
define a new class that derives from an existing class and from
ProbabilisticMixIn. You will need to define a new constructor for
the new class, which explicitly calls the constructors of both its
parent classes. For example:
>>> class A:
... def __init__(self, x, y): self.data = (x,y)
...
>>> class ProbabilisticA(A, ProbabilisticMixIn):
... def __init__(self, x, y, **prob_kwarg):
... A.__init__(self, x, y)
... ProbabilisticMixIn.__init__(self, **prob_kwarg)
See the documentation for the ProbabilisticMixIn
L{constructor<__init__>} for information about the arguments it
expects.
You should generally also redefine the string representation
methods, the comparison methods, and the hashing method.
"""
def __init__(self, **kwargs):
"""
Initialize this object's probability. This initializer should
be called by subclass constructors. C{prob} should generally be
the first argument for those constructors.
@kwparam prob: The probability associated with the object.
@type prob: C{float}
@kwparam logprob: The log of the probability associated with
the object.
@type logrpob: C{float}
"""
if 'prob' in kwargs:
if 'logprob' in kwargs:
raise TypeError('Must specify either prob or logprob '
'(not both)')
else:
ProbabilisticMixIn.set_prob(self, kwargs['prob'])
elif 'logprob' in kwargs:
ProbabilisticMixIn.set_logprob(self, kwargs['logprob'])
else:
self.__prob = self.__logprob = None
def set_prob(self, prob):
"""
Set the probability associated with this object to C{prob}.
@param prob: The new probability
@type prob: C{float}
"""
self.__prob = prob
self.__logprob = None
def set_logprob(self, logprob):
"""
Set the log probability associated with this object to
C{logprob}. I.e., set the probability associated with this
object to C{exp(logprob)}.
@param logprob: The new log probability
@type logprob: C{float}
"""
self.__logprob = prob
self.__prob = None
def prob(self):
"""
@return: The probability associated with this object.
@rtype: C{float}
"""
if self.__prob is None:
if self.__logprob is None: return None
self.__prob = math.exp(self.__logprob)
return self.__prob
def logprob(self):
"""
@return: C{log(p)}, where C{p} is the probability associated
with this object.
@rtype: C{float}
"""
if self.__logprob is None:
if self.__prob is None: return None
self.__logprob = math.log(self.__prob)
return self.__logprob
class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
def set_prob(self, prob):
raise ValueError, '%s is immutable' % self.__class__.__name__
def set_logprob(self, prob):
raise ValueError, '%s is immutable' % self.__class__.__name__
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _create_rand_fdist(numsamples, numoutcomes):
"""
Create a new frequency distribution, with random samples. The
samples are numbers from 1 to C{numsamples}, and are generated by
summing two numbers, each of which has a uniform distribution.
"""
import random
from math import sqrt
fdist = FreqDist()
for x in range(numoutcomes):
y = (random.randint(1, (1+numsamples)/2) +
random.randint(0, numsamples/2))
fdist.inc(y)
return fdist
def _create_sum_pdist(numsamples):
"""
Return the true probability distribution for the experiment
C{_create_rand_fdist(numsamples, x)}.
"""
fdist = FreqDist()
for x in range(1, (1+numsamples)/2+1):
for y in range(0, numsamples/2+1):
fdist.inc(x+y)
return MLEProbDist(fdist)
def demo(numsamples=6, numoutcomes=500):
"""
A demonstration of frequency distributions and probability
distributions. This demonstration creates three frequency
distributions with, and uses them to sample a random process with
C{numsamples} samples. Each frequency distribution is sampled
C{numoutcomes} times. These three frequency distributions are
then used to build six probability distributions. Finally, the
probability estimates of these distributions are compared to the
actual probability of each sample.
@type numsamples: C{int}
@param numsamples: The number of samples to use in each demo
frequency distributions.
@type numoutcomes: C{int}
@param numoutcomes: The total number of outcomes for each
demo frequency distribution. These outcomes are divided into
C{numsamples} bins.
@rtype: C{None}
"""
# Randomly sample a stochastic process three times.
fdist1 = _create_rand_fdist(numsamples, numoutcomes)
fdist2 = _create_rand_fdist(numsamples, numoutcomes)
fdist3 = _create_rand_fdist(numsamples, numoutcomes)
# Use our samples to create probability distributions.
pdists = [
MLEProbDist(fdist1),
LidstoneProbDist(fdist1, 0.5, numsamples),
HeldoutProbDist(fdist1, fdist2, numsamples),
HeldoutProbDist(fdist2, fdist1, numsamples),
CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
_create_sum_pdist(numsamples),
]
# Find the probability of each sample.
vals = []
for n in range(1,numsamples+1):
vals.append(tuple([n, fdist1.freq(n)] +
[pdist.prob(n) for pdist in pdists]))
# Print the results in a formatted table.
print ('%d samples (1-%d); %d outcomes were sampled for each FreqDist' %
(numsamples, numsamples, numoutcomes))
print '='*9*(len(pdists)+2)
FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual'
print FORMATSTR % tuple([`pdist`[1:9] for pdist in pdists[:-1]])
print '-'*9*(len(pdists)+2)
FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f'
for val in vals:
print FORMATSTR % val
# Print the totals for each column (should all be 1.0)
zvals = zip(*vals)
def sum(lst): return reduce(lambda x,y:x+y, lst, 0)
sums = [sum(val) for val in zvals[1:]]
print '-'*9*(len(pdists)+2)
FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f'
print FORMATSTR % tuple(sums)
print '='*9*(len(pdists)+2)
# Display the distributions themselves, if they're short enough.
if len(`str(fdist1)`) < 70:
print ' fdist1:', str(fdist1)
print ' fdist2:', str(fdist2)
print ' fdist3:', str(fdist3)
print
if __name__ == '__main__':
demo(6, 10)
demo(5, 5000)
| gpl-3.0 |
roxyboy/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 263 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
boscotsang/BayesDigitClassify | classify_ternary.py | 1 | 2776 | import numpy
from sklearn.metrics import confusion_matrix
def load_data():
train_labels = []
with open('digitdata/traininglabels', 'rb') as f:
for i, line in enumerate(f):
train_labels.append(int(line))
train_labels = numpy.array(train_labels, dtype=int)
train_x = numpy.zeros((train_labels.shape[0] * 28 * 28))
with open('digitdata/trainingimages', 'rb') as f:
for i, line in enumerate(f):
for j, char in enumerate(line.strip('\n')):
if '+' == char:
train_x[i * 28 + j] = 1
if '#' == char:
train_x[i * 28 + j] = 2
train_x = numpy.array(train_x, dtype=int).reshape((train_labels.shape[0], 28 * 28))
test_labels = []
with open('digitdata/testlabels', 'rb') as f:
for i, line in enumerate(f):
test_labels.append(int(line))
test_labels = numpy.array(test_labels, dtype=int)
test_x = numpy.zeros((test_labels.shape[0] * 28 * 28))
with open('digitdata/testimages', 'rb') as f:
for i, line in enumerate(f):
for j, char in enumerate(line.strip('\n')):
if '+' == char:
test_x[i * 28 + j] = 1
if '#' == char:
test_x[i * 28 + j] = 2
test_x = numpy.array(test_x, dtype=int).reshape((test_labels.shape[0], 28 * 28))
return train_x, train_labels, test_x, test_labels
class BayesClassifier(object):
def __init__(self):
self.bayesmatrix = None
def fit(self, X, y):
bayesmatrix = numpy.ones((10, 3, 28 * 28), dtype=numpy.float64)
for k in xrange(10):
for i in xrange(3):
for j in xrange(X.shape[1]):
bayesmatrix[k, i, j] = numpy.sum(X[y==k, j]==i)
numclass = numpy.zeros(10)
for i in xrange(10):
numclass[i] = numpy.sum(y==i) + 1
bayesmatrix += 1
bayesmatrix /= numclass[:, numpy.newaxis, numpy.newaxis]
self.bayesmatrix = bayesmatrix
def predict(self, X):
labels = []
for i in xrange(X.shape[0]):
label = numpy.argmax(numpy.sum(numpy.log(self.bayesmatrix[:, 0, X[i, :]==0]), axis=1) +
numpy.sum(numpy.log(self.bayesmatrix[:, 1, X[i, :]==1]), axis=1) +
numpy.sum(numpy.log(self.bayesmatrix[:, 2, X[i, :]==2]), axis=1))
labels.append(label)
return numpy.array(labels)
if "__main__" == __name__:
X, y, test_x, test_y = load_data()
clf = BayesClassifier()
clf.fit(X, y)
pr = clf.predict(test_x)
print "Confusion Matrix"
print confusion_matrix(test_y, pr)
print "Accuracy"
print numpy.sum(pr == test_y) / float(test_y.shape[0])
| mit |
ephes/scikit-learn | sklearn/utils/tests/test_class_weight.py | 139 | 11909 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
fberanizo/neural_network | tests/ibovespa/mlp.py | 1 | 5708 | # -*- coding: utf-8 -*-
import sys, os
sys.path.insert(0, os.path.abspath('../..'))
import unittest, pandas, numpy, datetime, itertools, mlp
from sklearn import cross_validation, preprocessing
class MLP(unittest.TestCase):
"""Test cases for Ibovespa tendency problem."""
grid_search = True
def test_1(self):
"""Tests the accuracy of a MLP using k-folds validation method."""
# Read data from CSV files
X_train, X_test, y_train, y_test = self.read_data()
# Rescales data
min_max_scaler = preprocessing.MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train)
X_test = min_max_scaler.fit_transform(X_test)
y_train = min_max_scaler.fit_transform(y_train)
y_test = min_max_scaler.fit_transform(y_test)
n_folds = 5
accuracies = map(lambda x: 0, self.hipergrid())
for idx, hiperparams in enumerate(self.hipergrid()):
skf = cross_validation.StratifiedKFold(y_train.flatten(), n_folds=n_folds)
for fold, (train_index, test_index) in enumerate(skf):
self.progress(((1.0+fold)+n_folds*idx)/(len(self.hipergrid())*n_folds))
X_train2, X_test2 = X_train[train_index], X_train[test_index]
y_train2, y_test2 = y_train[train_index], y_train[test_index]
classifier = mlp.MLP(**hiperparams).fit(X_train2, y_train2)
accuracies[idx] += classifier.score(X_test2, y_test2)
# Finds which hiperparams give maximum accuracy
best_hiperparams = self.hipergrid()[accuracies.index(numpy.max(accuracies))]
accuracy = classifier.score(X_test, y_test)
print 'Acurácia no cj treino:' + str(numpy.max(accuracies)/n_folds)
print 'Acurácia no cj teste:' + str(accuracy)
print 'Melhores hiperparâmetros: ' + str(best_hiperparams)
def read_data(self):
"""Reads and processes financial data from CSV files"""
ibovespa = "%5EBVSP"
america = ["%5EGSPC", "%5EDJI", "%5EMERV", "%5EMXX", "%5EIXIC", "%5EIPSA"]
europe = ["%5EFTSE", "%5EGDAXI", "%5EFCHI", "FTSEMIB.MI", "%5EIBEX"]
asia = ["%5EN225", "%5EHSI", "%5EBSESN", "%5ESSEC", "%5EJKSE"]
continents = 3
stocks_per_continent = 5
time_window = 7 # 7 days
prediction_range = 1 # 1 day
stocks = america + europe + asia
# Request stock data
# data = {}
# url = "http://ichart.finance.yahoo.com/table.csv?s=STOCK_NAME&g=d&a=0&b=1&c=2016&&ignore=.csv"
# for stock_name in america + europe + asia + [ibovespa]:
# print stock_name
# s = requests.get(url.replace("STOCK_NAME", stock_name)).content
# stock = pandas.read_csv(io.StringIO(s.decode('utf-8'))).set_index("Date")
# stock.to_csv('input/' + stock_name + '.csv')
ibovespa_data = pandas.read_csv('input/' + ibovespa + '.csv', parse_dates=['Date'])
stock_data = pandas.DataFrame(data=[], columns=['Date','Open','High','Low','Close','Volume','Adj Close'])
for stock in stocks:
stock_data = stock_data.append(pandas.read_csv('input/' + stock + '.csv', parse_dates=['Date']))
train = pandas.DataFrame(data=[], columns=['Date', 'Trend']).set_index("Date")
test = pandas.DataFrame(data=[], columns=['Date', 'Trend']).set_index("Date")
for idx, ibovespa_data in ibovespa_data.iterrows():
trend = 0 if ibovespa_data["Close"] < ibovespa_data["Open"] else 1
start_date = ibovespa_data["Date"] + pandas.Timedelta('-1 days')
end_date = ibovespa_data["Date"] + pandas.Timedelta('-1 days')
mask = (stock_data['Date'] >= start_date) & (stock_data['Date'] <= end_date)
stocks = stock_data.loc[mask]['Close'].tolist()
columns = ['Date', 'Trend'] + range(len(stocks))
data = [ibovespa_data["Date"], trend] + stocks
row = pandas.DataFrame([data], columns=columns).set_index("Date")
# Data from last 3 months is test, the rest is train
three_months_ago = pandas.to_datetime('today') + pandas.Timedelta('-90 days')
if ibovespa_data["Date"] < three_months_ago:
train = train.append(row)
else:
test = test.append(row)
# Removes rows with NaN columns
train.dropna(axis=0, how='any', inplace=True)
test.dropna(axis=0, how='any', inplace=True)
X_train = train[train.columns.tolist()[:-1]].as_matrix()
y_train = train[train.columns.tolist()[-1:]].as_matrix()
X_test = test[test.columns.tolist()[:-1]].as_matrix()
y_test = test[test.columns.tolist()[-1:]].as_matrix()
return X_train, X_test, y_train, y_test
def hipergrid(self):
"""Hiperparameters for MLP"""
hidden_layer_size = [{'hidden_layer_size':3},{'hidden_layer_size':5},{'hidden_layer_size':7}]
learning_rate = [{'learning_rate':0.1},{'learning_rate':0.3},{'learning_rate':1}]
grid = []
for hiperparams in itertools.product(hidden_layer_size, learning_rate):
d = {}
for hiperparam in hiperparams:
d.update(hiperparam)
grid.append(d)
return grid
def progress(self, percent):
"""Prints progress in stdout"""
bar_length = 20
hashes = '#' * int(round(percent * bar_length))
spaces = ' ' * (bar_length - len(hashes))
sys.stdout.write("\rPerforming 5-folds grid search: [{0}] {1}%".format(hashes + spaces, int(round(percent * 100))))
sys.stdout.flush()
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
moverlan/LOTlib | LOTlib/Examples/SymbolicRegression/Galileo/Run.py | 1 | 1312 | # -*- coding: utf-8 -*-
"""
This uses Galileo's data on a falling ball.
See: http://www.amstat.org/publications/jse/v3n1/datasets.dickey.html
See also: Jeffreys, W. H., and Berger, J. O. (1992), "Ockham's Razor and Bayesian Analysis," American
Scientist, 80, 64-72 (Erratum, p. 116).
"""
from LOTlib.Hypotheses.GaussianLOTHypothesis import GaussianLOTHypothesis
from LOTlib.FiniteBestSet import FiniteBestSet
from LOTlib.Inference.MetropolisHastings import MHSampler
from LOTlib.Miscellaneous import qq
from Data import data
from Grammar import grammar
from Utilities import make_h0
def run(*args):
"""The running function."""
# starting hypothesis -- here this generates at random
h0 = GaussianLOTHypothesis(grammar)
# We store the top 100 from each run
pq = FiniteBestSet(N=100, max=True, key="posterior_score")
pq.add(MHSampler(h0, data, STEPS, skip=SKIP))
return pq
if __name__ == "__main__":
CHAINS = 10
STEPS = 10000000
SKIP = 0
finitesample = FiniteBestSet(max=True) # the finite sample of all
results = map(run, [ [None] ] * CHAINS ) # Run on a single core
finitesample.merge(results)
## and display
for r in finitesample.get_all(decreasing=False, sorted=True):
print r.posterior_score, r.prior, r.likelihood, qq(str(r))
| gpl-3.0 |
cpausmit/IntelROCCS | Detox/python/siteProperties.py | 3 | 12964 | #====================================================================================================
# C L A S S E S concerning the site description
#====================================================================================================
#---------------------------------------------------------------------------------------------------
"""
Class: SiteProperties(siteName='')
Each site will be fully described for our application in this class.
"""
#---------------------------------------------------------------------------------------------------
import time, statistics
class SiteProperties:
"A SiteProperties defines all needed site properties."
def __init__(self, siteName):
self.name = siteName
self.datasetRanks = {}
self.rankSum = 0
self.datasetSizes = {}
self.dsetIsValid = {}
self.dsetIsCustodial = {}
self.dsetLastCopy = {}
self.dsetIsPartial = {}
self.deprecated = {}
self.dsetReqTime = {}
self.dsetUpdTime = {}
self.dsetIsDone = {}
self.dsetNotUsedOnTape = {}
self.wishList = []
self.datasetsToDelete = []
self.protectedList = []
self.siteSizeGbV = 0
self.spaceTakenV = 0
self.spaceNotUsed = 0
self.spaceLCp = 0
self.space2free = 0
self.deleted = 0
self.protected = 0
self.globalDsetIndex = 0
self.epochTime = int(time.time())
def addDataset(self,dset,rank,size,valid,partial,custodial,depr,reqtime,updtime,wasused,isdone):
self.dsetIsValid[dset] = valid
self.dsetIsPartial[dset] = partial
self.dsetIsCustodial[dset] = custodial
self.datasetRanks[dset] = rank
self.datasetSizes[dset] = size
if depr:
self.deprecated[dset] = depr
self.spaceTakenV = self.spaceTakenV + size
self.dsetIsDone[dset] = isdone
self.dsetReqTime[dset] = reqtime
self.dsetUpdTime[dset] = updtime
self.rankSum = self.rankSum + rank*size
if wasused == 0:
self.spaceNotUsed = self.spaceNotUsed + size
def makeWishList(self, dataPropers, ncopyMin, banInvalid=True):
space = 0
self.wishList = []
space2free = self.space2free
addedExtra = 0
counter = 0
for datasetName in sorted(self.datasetRanks.keys(), cmp=self.compare):
counter = counter + 1
if counter < self.globalDsetIndex:
continue
if space > (space2free-self.deleted):
break
if datasetName in self.datasetsToDelete:
continue
if datasetName in self.protectedList:
continue
#custodial set can't be on deletion wish list
if self.dsetIsCustodial[datasetName] :
continue
#if dataPropers[datasetName].daysSinceUsed() > 540:
if dataPropers[datasetName].isFullOnTape():
#delta = (self.epochTime - self.dsetUpdTime[datasetName])/(60*60*24)
if dataPropers[datasetName].getGlobalRank() > 500:
#if delta > 500:
space = space + self.datasetSizes[datasetName]
self.wishList.append(datasetName)
dataPropers[datasetName].kickFromPool = True
print "exp at " + self.name + ": " + datasetName
#print datasetName
#addedExtra = addedExtra + 1
continue
if "/RECO" in datasetName:
delta = (self.epochTime - self.dsetUpdTime[datasetName])/(60*60*24)
#if dataPropers[datasetName].daysSinceUsed() > 180 and delta>180:
if delta > 180:
space = space + self.datasetSizes[datasetName]
self.wishList.append(datasetName)
dataPropers[datasetName].kickFromPool = True
print "RECO " + self.name + ": " + datasetName
continue
else:
continue
#non-valid dataset can't be on deletion list
if banInvalid == True:
if not self.dsetIsValid[datasetName]:
continue
dataPr = dataPropers[datasetName]
if dataPr.nSites() > ncopyMin:
space = space + self.datasetSizes[datasetName]
self.wishList.append(datasetName)
self.globalDsetIndex = counter
def hasMoreToDelete(self, dataPropers, ncopyMin, banInvalid):
counter = 0
if self.globalDsetIndex >= len(self.datasetRanks.keys()):
return False
for datasetName in sorted(self.datasetRanks.keys(), cmp=self.compare):
counter = counter + 1
if counter < self.globalDsetIndex:
continue
if '/MINIAOD' in datasetName:
ncopyMinTemp = 3
else:
ncopyMinTemp = ncopyMin
if datasetName in self.datasetsToDelete:
continue
if datasetName in self.protectedList:
continue
#custodial set can't be on deletion wish list
if self.dsetIsCustodial[datasetName] :
continue
#non-valid dataset can't be on deletion list
if banInvalid == True:
if not self.dsetIsValid[datasetName]:
continue
if datasetName in self.wishList:
continue
dataPr = dataPropers[datasetName]
if dataPr.nSites() <= ncopyMinTemp:
continue
return True
return False
def onWishList(self,dset):
if dset in self.wishList:
return True
return False
def onProtectedList(self,dset):
if dset in self.protectedList:
return True
return False
def wantToDelete(self):
if self.deleted < self.space2free:
return True
else:
return False
def grantWish(self,dset):
if dset in self.protectedList:
return False
if dset in self.datasetsToDelete:
return False
#if self.deleted > self.space2free:
# return False
self.datasetsToDelete.append(dset)
self.deleted = self.deleted + self.datasetSizes[dset]
return True
def revokeWish(self,dset):
if dset in self.datasetsToDelete:
self.datasetsToDelete.remove(dset)
self.deleted = self.deleted - self.datasetSizes[dset]
def canBeLastCopy(self,dset,banInvalid):
if not banInvalid:
return True
#can't be partial dataset
if dset not in self.dsetIsPartial:
return False
if self.dsetIsPartial[dset] :
return False
#can't be non-valid dataset
if not self.dsetIsValid[dset]:
return False
return True
def pinDataset(self,dset):
if dset in self.datasetsToDelete:
return False
#can't pin partial dataset
if self.dsetIsPartial[dset] :
return False
#can't pin non-valid dataset
if not self.dsetIsValid[dset]:
return False
self.protectedList.append(dset)
self.protected = self.protected + self.datasetSizes[dset]
if dset in self.wishList:
self.wishList.remove(dset)
return True
def lastCopySpace(self,datasets,nCopyMin):
space = 0
self.dsetLastCopy = {}
for dset in self.datasetSizes.keys():
if dset in self.datasetsToDelete:
continue
dataset = datasets[dset]
remaining = dataset.nSites() - dataset.nBeDeleted()
if remaining <= nCopyMin:
self.dsetLastCopy[dset] = 1
space = space + self.datasetSizes[dset]
self.spaceLCp = space
def setSiteSize(self,size):
self.siteSizeGbV = size
def siteSizeGb(self):
return self.siteSizeGbV
def dsetRank(self,set):
return self.datasetRanks[set]
def dsetSize(self,set):
return self.datasetSizes[set]
def isPartial(self,set):
return self.dsetIsPartial[set]
def siteName(self):
return self.name
def spaceTaken(self):
return self.spaceTakenV
def spaceDeleted(self):
return self.deleted
def spaceProtected(self):
return self.protected
def spaceFree(self):
return self.siteSizeGbV - (self.spaceTakenV - self.deleted)
def spaceLastCp(self):
return self.spaceLCp
def isDeprecated(self,dset):
if dset in self.deprecated:
return True
return False
def spaceDeprecated(self):
size = 0
for dset in self.deprecated:
size = size + self.datasetSizes[dset]
return size
def spaceIncomplete(self):
size = 0;
for dset in self.dsetIsPartial:
if self.dsetIsPartial[dset]:
size = size + self.datasetSizes[dset]
return size
def spaceCustodial(self):
size = 0;
for dset in self.dsetIsCustodial:
if self.dsetIsCustodial[dset]:
size = size + self.datasetSizes[dset]
return size
def spaceUtouchable(self):
size = 0
for dset in self.dsetLastCopy:
size = size + self.datasetSizes[dset]
for dset in self.dsetIsCustodial:
if dset in self.dsetLastCopy:
continue
if self.dsetIsCustodial[dset]:
size = size + self.datasetSizes[dset]
return size
def nsetsDeprecated(self):
nsets = 0
for dset in self.deprecated:
nsets = nsets + 1
return nsets
def hasDataset(self,dset):
if dset in self.datasetRanks:
return True
else:
return False
def willDelete(self,dset):
if dset in self.datasetsToDelete:
return True
else:
return False
def allSets(self):
return sorted(self.datasetRanks.keys(), cmp=self.compare)
def delTargets(self):
return sorted(self.datasetsToDelete, cmp=self.compare)
def protectedSets(self):
return sorted(self.protectedList, cmp=self.compare)
def setSpaceToFree(self,size):
self.space2free = size
def reqTime(self,dset):
return self.dsetReqTime[dset]
def dsetLoadTime(self,dset):
return (self.dsetUpdTime[dset] - self.dsetReqTime[dset])
def spaceUnused(self):
return self.spaceNotUsed
def siteRank(self):
if self.spaceTakenV == 0:
return 0
return self.rankSum/self.spaceTakenV
def medianRank(self):
if len(self.datasetRanks.values()) > 0:
return statistics.median(self.datasetRanks.values())
return 0
def dsetIsStuck(self,dset):
if self.dsetIsDone[dset] == 0:
reqtime = self.dsetReqTime[dset]
if (self.epochTime - reqtime) > 60*60*24*14:
return 1
return 0
def considerForStats(self,dset):
if self.dsetLoadTime(dset) > 60*60*24*14:
return False
if self.dsetLoadTime(dset) <= 0:
return False
if (self.epochTime - self.dsetReqTime[dset]) > 60*60*24*90:
return False
return True
def getDownloadStats(self):
loadSize = 0
loadTime = 0
stuck = 0
for dset in self.datasetSizes:
if self.dsetIsStuck(dset) == 1:
stuck = stuck + 1
continue
if not self.considerForStats(dset):
continue
if self.datasetSizes[dset] > 10:
loadSize = loadSize + self.datasetSizes[dset]
loadTime = loadTime + self.dsetLoadTime(dset)
speed = 0
if loadTime > 0:
speed = loadSize/loadTime*(60*60*24)
return (speed, loadSize, stuck)
def getAverage(self,array):
if len(array) < 3: return 0
sortA = sorted(array)
diff = 100
prevMean = sortA[len(sortA)/2]
prevRms = sortA[len(sortA)-1] - sortA[0]
print sortA
while diff > 0.01:
ave = 0
aveSq = 0
nit = 0
for i in range(1, len(sortA)):
if abs(sortA[i] - prevMean) > 1.6*prevRms:
continue
ave = ave + sortA[i]
aveSq = aveSq + sortA[i]*sortA[i]
nit = nit + 1
ave = ave/nit
rms = math.sqrt(aveSq/nit - ave*ave)
diff = abs(ave - prevMean)/prevMean
prevMean = ave
prevRms = rms
return prevMean
def compare(self,item1, item2):
r1 = self.datasetRanks[item1]
r2 = self.datasetRanks[item2]
if r1 < r2:
return 1
elif r1 > r2:
return -1
else:
return 0
| mit |
google-research/selfstudy-adversarial-robustness | defense_randomneuron/model.py | 1 | 2860 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model of the defense 0."""
import numpy as np
import tensorflow as tf
from common.framework import DefenseModel, get_checkpoint_abs_path
from common.networks import AllConvModel, AllConvModelTorch
import common.utils as utils
MODEL_PATH = 'checkpoints/baseline/final_checkpoint-1'
class RandomDropModel(AllConvModel):
def __call__(self, x, training=False):
del training
for layer in self.layers:
x = layer(x)
if isinstance(layer, tf.keras.layers.Conv2D):
_,a,b,c = x.shape
p = tf.abs(x)/tf.reduce_sum(tf.abs(x), axis=(1,2,3), keepdims=True)
p_keep = 1-tf.exp(-a*b*c / 3 * p)
keep = tf.random.uniform(p_keep.shape)<p_keep
x = tf.cast(keep, tf.float32)*x/p_keep
return x
class Defense(DefenseModel):
def __init__(self):
self.convnet = RandomDropModel(num_classes=10,
num_filters=64,
input_shape=[32, 32, 3])
tf.train.Checkpoint(model=self.convnet).restore(
get_checkpoint_abs_path(MODEL_PATH))
self.to_tensor = lambda x: x
def classify(self, x):
preds = [utils.to_numpy(self.convnet(self.to_tensor(x))) for _ in range(10)]
return np.mean(preds, axis=0)
class RandomDropModelTorch(AllConvModelTorch):
def __call__(self, x, training=False):
import torch
del training
for layer in self.layers:
x = layer(x)
if isinstance(layer, torch.nn.Conv2d):
_,a,b,c = x.shape
p = torch.abs(x)/torch.sum(torch.abs(x), axis=(1,2,3), keepdims=True)
p_keep = 1-torch.exp(-a*b*c / 3 * p)
keep = torch.rand(p_keep.shape)<p_keep
x = keep.float()*x/p_keep
return x
class DefenseTorch(Defense):
def __init__(self):
import torch
self.convnet = RandomDropModelTorch(num_classes=10,
num_filters=64,
input_shape=[3, 32, 32])
self.convnet.load_state_dict(
torch.load(get_checkpoint_abs_path(MODEL_PATH) + ".torchmodel"))
self.to_tensor = torch.tensor
| apache-2.0 |
roxyboy/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 302 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
JustinNoel1/ML-Course | linear-regression/python/linreg.py | 1 | 1895 |
from sklearn.linear_model import LinearRegression
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.metrics import mean_squared_error
# Fix the number of samples and our seed
NUM_SAMPLES = 200
np.random.seed(42)
# Our "true function"
def f(x):
return 1.5*x + 0.5
#Construct array of (x,f(x))-pairs where x is sampled randomly from unit interval
data = np.array([[x,f(x) ] for x in np.random.random(NUM_SAMPLES)])
# Create regular grid of x values and the values of f
gridx = np.linspace(0, 1, 200)
gridy = np.array([f(x) for x in gridx])
# Add Gaussian noise with sigma=0.6
normaly = data[:,1]+0.6*np.random.randn(NUM_SAMPLES)
#Plot the messy data
plt.scatter(data[:,0], normaly )
plt.title("Scatter plot of synthetic data with normal errors")
#Plot the true function
plt.plot(gridx, gridy, label = "True function", color = 'Red')
plt.legend(loc = 2)
# Save and clear
plt.savefig("scatter_normal.png")
plt.cla()
# Fit linear regressors to increasingly large intervals of data
lm = LinearRegression()
for i in range(1, NUM_SAMPLES+1):
# Fit the regressor
lm.fit(data[:i,0].reshape((i,1)), normaly[:i].reshape((i,1)))
# Get the predictions on all of the sample points
predictions = lm.predict(data[:,0].reshape(NUM_SAMPLES,1))
# Get MSE
mse = mean_squared_error(predictions, normaly)
# Plot the messy data
plt.scatter(data[:,0], normaly)
plt.title("Linear regression on {} points with normal error".format(i))
# Plot the true function
plt.plot(gridx, gridy, label = "True function", color = 'Red')
# Plot the regression line
plt.plot(gridx, [lm.coef_[0] * x + lm.intercept_[0] for x in gridx], label = "Linear regressor line MSE = {:0.4f}".format(mse), color = 'Green')
plt.legend(loc = 2)
# Save and clear
plt.savefig("linreg_normal_{:03d}.png".format(i))
plt.cla()
| apache-2.0 |
SKA-INAF/caesar | scripts/skymodel_generator.py | 1 | 65709 | #!/usr/bin/env python
##################################################
### MODULE IMPORT
##################################################
## STANDARD MODULES
import os
import sys
import subprocess
import string
import time
import signal
from threading import Thread
import datetime
import numpy as np
import random
import math
##from ctypes import *
## ASTRO
from scipy import ndimage
##import pyfits
from astropy.io import fits
from astropy.units import Quantity
from astropy.modeling.parameters import Parameter
from astropy.modeling.core import Fittable2DModel
from astropy.modeling.models import Box2D, Gaussian2D, Ring2D, Ellipse2D, TrapezoidDisk2D, Disk2D, AiryDisk2D, Sersic2D
#from photutils.datasets import make_noise_image
from astropy import wcs
## ROOT
import ROOT
from ROOT import gSystem, TFile, TTree, gROOT, AddressOf
## CAESAR
gSystem.Load('libCaesar')
from ROOT import Caesar
## COMMAND-LINE ARG MODULES
import getopt
import argparse
import collections
## Graphics modules
import matplotlib.pyplot as plt
import pylab
## LOGGER
import logging
import logging.config
logger = logging.getLogger(__name__)
logging.basicConfig(format="%(asctime)-15s %(levelname)s - %(message)s",datefmt='%Y-%m-%d %H:%M:%S')
logger= logging.getLogger(__name__)
logger.setLevel(logging.INFO)
##################################################
#### GET SCRIPT ARGS ####
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def get_args():
"""This function parses and return arguments passed in"""
parser = argparse.ArgumentParser(description="Parse args.")
# - GENERAL IMAGE OPTIONS
parser.add_argument('-nx', '--nx', dest='nx', required=True, type=int, action='store',help='Image width in pixels')
parser.add_argument('-ny', '--ny', dest='ny', required=True, type=int, action='store',help='Image height in pixels')
parser.add_argument('-marginx', '--marginx', dest='marginx', required=False, type=int, default=0,action='store',help='Image x margin in pixels')
parser.add_argument('-marginy', '--marginy', dest='marginy', required=False, type=int, default=0,action='store',help='Image y margin in pixels')
parser.add_argument('-pixsize', '--pixsize', dest='pixsize', required=True, type=float, action='store',help='Map pixel size in arcsec')
parser.add_argument('-bmaj', '--bmaj', dest='bmaj', required=True, type=float, default=10, action='store',help='Beam bmaj in arcsec (default=5)')
parser.add_argument('-bmin', '--bmin', dest='bmin', required=True, type=float, default=5, action='store',help='Beam bmin in arcsec (default=5)')
parser.add_argument('-bpa', '--bpa', dest='bpa', required=False, type=float, default=0, action='store',help='Beam bpa in deg (default=0)')
parser.add_argument('-crpix1', '--crpix1', dest='crpix1', required=False, type=float, default=1, action='store',help='CRPIX1 fits keyword (default=1)')
parser.add_argument('-crpix2', '--crpix2', dest='crpix2', required=False, type=float, default=1, action='store',help='CRPIX2 fits keyword (default=1)')
parser.add_argument('-crval1', '--crval1', dest='crval1', required=False, type=float, default=254.851041667, action='store',help='CRVAL1 fits keyword (default=1)')
parser.add_argument('-crval2', '--crval2', dest='crval2', required=False, type=float, default=-41.4765888889, action='store',help='CRVAL2 fits keyword (default=1)')
#parser.add_argument('-ctype1', '--ctype1', dest='ctype1', required=False, type=str, default='RA---NCP', action='store',help='CTYPE1 fits keyword (default=1)')
#parser.add_argument('-ctype2', '--ctype2', dest='ctype2', required=False, type=str, default='DEC--NCP', action='store',help='CTYPE2 fits keyword (default=1)')
parser.add_argument('-ctype1', '--ctype1', dest='ctype1', required=False, type=str, default='RA---SIN', action='store',help='CTYPE1 fits keyword (default=1)')
parser.add_argument('-ctype2', '--ctype2', dest='ctype2', required=False, type=str, default='DEC--SIN', action='store',help='CTYPE2 fits keyword (default=1)')
parser.add_argument('-maskimg', '--maskimg', dest='maskimg', required=False, type=str, default='', action='store',help='FITS image used as mask (not generating sources if mask pixel!=0)')
# - BKG OPTIONS
parser.add_argument('--bkg', dest='enable_bkg', action='store_true')
parser.add_argument('--no-bkg', dest='enable_bkg', action='store_false')
parser.set_defaults(enable_bkg=True)
parser.add_argument('-bkg_level', '--bkg_level', dest='bkg_level', required=False, type=float, default=10e-6, action='store',help='Bkg level (default=0)')
parser.add_argument('-bkg_rms', '--bkg_rms', dest='bkg_rms', required=False, type=float, default=100e-6, action='store',help='Bkg rms (default=0)')
# - COMPACT SOURCE OPTIONS
parser.add_argument('-npixels_min', '--npixels_min', dest='npixels_min', required=False, type=int, default=5, action='store',help='Minimum number of pixels for a generated source (default=5)')
parser.add_argument('--compactsources', dest='enable_compactsources', action='store_true')
parser.add_argument('--no-compactsources', dest='enable_compactsources', action='store_false')
parser.set_defaults(enable_compactsources=True)
parser.add_argument('-nsources', '--nsources', dest='nsources', required=False, type=int, default=0, action='store',help='Compact source number (if >0 overrides the density generation) (default=0)')
parser.add_argument('-nx_gen', '--nx_gen', dest='nx_gen', required=False, type=int, default=501, action='store',help='Blob image width in pixels')
parser.add_argument('-ny_gen', '--ny_gen', dest='ny_gen', required=False, type=int, default=501, action='store',help='Blob image height in pixels')
parser.add_argument('-zmin', '--zmin', dest='zmin', required=False, type=float, default=1, action='store',help='Minimum source significance level in sigmas above the bkg (default=1)')
parser.add_argument('-zmax', '--zmax', dest='zmax', required=False, type=float, default=30, action='store',help='Maximum source significance level in sigmas above the bkg (default=30)')
parser.add_argument('-source_density', '--source_density', dest='source_density', required=False, type=float, default=1000, action='store',help='Compact source density (default=1000)')
parser.add_argument('-bmaj_min', '--bmaj_min', dest='bmaj_min', required=False, type=float, default=4, action='store',help='Gaussian components min bmaj in arcsec (default=4)')
parser.add_argument('-bmaj_max', '--bmaj_max', dest='bmaj_max', required=False, type=float, default=10, action='store',help='Gaussian components max bmaj in arcsec (default=10)')
parser.add_argument('-bmin_min', '--bmin_min', dest='bmin_min', required=False, type=float, default=4, action='store',help='Gaussian components min bmin in arcsec (default=4)')
parser.add_argument('-bmin_max', '--bmin_max', dest='bmin_max', required=False, type=float, default=10, action='store',help='Gaussian components max bmin in arcsec (default=10)')
parser.add_argument('-pa_min', '--pa_min', dest='pa_min', required=False, type=float, default=-90, action='store',help='Gaussian components min position angle in deg (default=0)')
parser.add_argument('-pa_max', '--pa_max', dest='pa_max', required=False, type=float, default=90, action='store',help='Gaussian components max position angle in deg (default=180)')
parser.add_argument('-Smin', '--Smin', dest='Smin', required=False, type=float, default=1.e-6, action='store',help='Minimum source flux in Jy (default=1.e-6)')
parser.add_argument('-Smax', '--Smax', dest='Smax', required=False, type=float, default=1, action='store',help='Maximum source flux in Jy (default=1)')
parser.add_argument('-Smodel', '--Smodel', dest='Smodel', required=False, type=str, default='uniform', action='store',help='Source flux generation model (default=uniform)')
parser.add_argument('-Sslope', '--Sslope', dest='Sslope', required=False, type=float, default=1.6, action='store',help='Slope par in expo source flux generation model (default=1.6)')
# - EXTENDED SOURCES
parser.add_argument('--extsources', dest='enable_extsources', action='store_true')
parser.add_argument('--no-extsources', dest='enable_extsources', action='store_false')
parser.set_defaults(enable_extsources=True)
parser.add_argument('-ext_nsources', '--ext_nsources', dest='ext_nsources', required=False, type=int, default=0, action='store',help='Extended source number (if >0 overrides the density generation) (default=0)')
parser.add_argument('-ext_source_density', '--ext_source_density', dest='ext_source_density', required=False, type=float, default=100, action='store',help='Extended source density (default=1000)')
parser.add_argument('-Smin_ext', '--Smin_ext', dest='Smin_ext', required=False, type=float, default=1.e-6, action='store',help='Minimum extended source flux in Jy (default=1.e-6)')
parser.add_argument('-Smax_ext', '--Smax_ext', dest='Smax_ext', required=False, type=float, default=1, action='store',help='Maximum extended source flux in Jy (default=1)')
parser.add_argument('-zmin_ext', '--zmin_ext', dest='zmin_ext', required=False, type=float, default=0.1, action='store',help='Minimum extended source significance level in sigmas above the bkg (default=0.1)')
parser.add_argument('-zmax_ext', '--zmax_ext', dest='zmax_ext', required=False, type=float, default=2, action='store',help='Maximum extended source significance level in sigmas above the bkg (default=2)')
parser.add_argument('-ext_scale_min', '--ext_scale_min', dest='ext_scale_min', required=False, type=float, default=10, action='store',help='Minimum extended source size in arcsec (default=10)')
parser.add_argument('-ext_scale_max', '--ext_scale_max', dest='ext_scale_max', required=False, type=float, default=3600, action='store',help='Maximum extended source size in arcsec (default=3600)')
parser.add_argument('-ext_source_type', '--ext_source_type', dest='ext_source_type', required=False, type=int, default=-1, action='store',help='Extended source type to generate (-1=all types from available models, 1=ring, 2=ellipse, 3=bubble+shell, 4=airy disk (default=-1)')
# - SOURCE MODEL OPTIONS
parser.add_argument('-ring_rmin', '--ring_rmin', dest='ring_rmin', required=False, type=float, default=0.5, action='store',help='Minimum ring radius in arcsec (default=1)')
parser.add_argument('-ring_rmax', '--ring_rmax', dest='ring_rmax', required=False, type=float, default=10, action='store',help='Maximum ring radius in arcsec (default=10)')
parser.add_argument('-ring_wmin', '--ring_wmin', dest='ring_wmin', required=False, type=float, default=5, action='store',help='Minimum ring width in arcsec (default=1)')
parser.add_argument('-ring_wmax', '--ring_wmax', dest='ring_wmax', required=False, type=float, default=20, action='store',help='Maximum ring width in arcsec (default=10)')
parser.add_argument('-ellipse_rmin', '--ellipse_rmin', dest='ellipse_rmin', required=False, type=float, default=0.5, action='store',help='Ellipse bmaj in arcsec (default=1)')
parser.add_argument('-ellipse_rmax', '--ellipse_rmax', dest='ellipse_rmax', required=False, type=float, default=10, action='store',help='Ellipse bmin in arcsec (default=10)')
parser.add_argument('-disk_shell_ampl_ratio_min', '--disk_shell_ampl_ratio_min', dest='disk_shell_ampl_ratio_min', required=False, type=float, default=0.1, action='store',help='Disk/shell amplitude ratio min (default=0.1)')
parser.add_argument('-disk_shell_ampl_ratio_max', '--disk_shell_ampl_ratio_max', dest='disk_shell_ampl_ratio_max', required=False, type=float, default=0.8, action='store',help='Disk/shell amplitude ratio max (default=0.8)')
parser.add_argument('-disk_shell_radius_ratio_min', '--disk_shell_radius_ratio_min', dest='disk_shell_radius_ratio_min', required=False, type=float, default=0.6, action='store',help='Disk/shell radius ratio min (default=0.6)')
parser.add_argument('-disk_shell_radius_ratio_max', '--disk_shell_radius_ratio_max', dest='disk_shell_radius_ratio_max', required=False, type=float, default=0.9, action='store',help='Disk/shell radius ratio max (default=0.8)')
parser.add_argument('-zmin_model', '--zmin_model', dest='model_trunc_zmin', required=False, type=float, default=1, action='store',help='Minimum source significance level in sigmas above the bkg below which source data are set to 0 (default=1)')
parser.add_argument('-mask_boxsize', '--mask_boxsize', dest='mask_boxsize', required=False, type=float, default=10, action='store',help='Mask box size in pixels (default=10)')
parser.add_argument('-trunc_thr', '--trunc_thr', dest='trunc_thr', required=False, type=float, default=0.01, action='store',help='Source model truncation thr (default=0.01)')
parser.add_argument('--truncmodels', dest='truncate_models', action='store_true')
parser.add_argument('--no-truncmodels', dest='truncate_models', action='store_false')
parser.set_defaults(truncate_models=True)
# - OUTPUT FILE OPTIONS
parser.add_argument('-outputfile', '--outputfile', dest='outputfile', required=False, type=str, default='simmap.fits',action='store',help='Output filename')
parser.add_argument('-outputfile_model', '--outputfile_model', dest='outputfile_model', required=False, type=str, default='skymodel.fits', action='store',help='Model filename')
parser.add_argument('-outputfile_sources', '--outputfile_sources', dest='outputfile_sources', required=False, type=str, default='sources.root',action='store',help='Skymodel source ROOT Output filename')
parser.add_argument('-outputfile_ds9region', '--outputfile_ds9region', dest='outputfile_ds9region', required=False, type=str, default='dsregion.reg',action='store',help='DS9 source region filename')
parser.add_argument('-outputfile_casaregion', '--outputfile_casaregion', dest='outputfile_casaregion', required=False, type=str, default='casa_mask.dat',action='store',help='CASA source region filename')
args = parser.parse_args()
return args
###########################
## MODELS
###########################
class RingSector2D(Fittable2DModel):
""" Two dimensional radial symmetric Ring model """
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
r_in = Parameter(default=1)
width = Parameter(default=1)
theta_min = Parameter(default=-np.pi)
theta_max = Parameter(default=np.pi)
def __init__(self, amplitude=amplitude.default, x_0=x_0.default, y_0=y_0.default, r_in=r_in.default, width=width.default, theta_min=theta_min.default, theta_max=theta_max.default, **kwargs):
# If outer radius explicitly given, it overrides default width.
if width is None:
width = self.width.default
if theta_min is None:
theta_min = self.theta_min.default
if theta_max is None:
theta_max = self.theta_max.default
super(RingSector2D, self).__init__(amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width, theta_min=theta_min, theta_max=theta_max, **kwargs)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, r_in, width, theta_min, theta_max):
"""Two dimensional Ring sector model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
theta = np.arctan2(x-x_0,y-y_0)
r_range = np.logical_and(rr >= r_in ** 2, rr <= (r_in + width) ** 2)
theta_range= np.logical_and(theta>=theta_min, theta<=theta_max)
sector_range = np.logical_and(r_range,theta_range)
result = np.select([sector_range], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.r_in + self.width
return ( (self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr) )
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit, 'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']), ('y_0', inputs_unit['x']), ('r_in', inputs_unit['x']), ('width', inputs_unit['x']), ('amplitude', outputs_unit['z'])])
###########################
## SIMULATOR CLASS
###########################
SIGMA_TO_FWHM= np.sqrt(8*np.log(2))
class SkyMapSimulator(object):
""" Sky map simulator class
Attributes:
nx: image width in pixels
ny: image height in pixels
pixsize: pixel size in arcsec (default=1)
"""
def __init__(self, nx, ny, pixsize=1):
""" Return a SkyMapGenerator object """
## Image parameters
self.nx = nx #in pixels
self.ny = ny # in pixels
self.marginx= 0 # in pixels (no margin)
self.marginy= 0 # in pixels (no margin)
self.pixsize= pixsize # in arcsec
self.gridy, self.gridx = np.mgrid[0:ny, 0:nx]
self.crpix1= 1
self.crpix2= 1
self.crval1= 254.851041667
self.crval2= -41.4765888889
self.ctype1= 'RA---SIN'
self.ctype2= 'DEC--SIN'
self.gmask_data= None
## Source model
self.truncate_models= True
self.trunc_thr= 0.01 # 1% flux truncation at maximum
self.trunc_model_zmin= 1
## Mask box size
self.mask_boxsize= 10 # in pixels
## Bkg parameters
self.simulate_bkg= True
self.bkg_level= 0 # in Jy
self.bkg_rms= 10.e-6 # in Jy
## Compact source parameters
self.simulate_compact_sources= True
#self.nx_gen= 1001
#self.ny_gen= 1001
self.nx_gen= 501
self.ny_gen= 501
self.gridy_gen, self.gridx_gen = np.mgrid[0:self.ny_gen, 0:self.nx_gen]
self.ps_list= []
self.nsources= 0 # default is density generator
self.source_density= 2000. # in sources/deg^2
self.beam_bmaj= 6.5 # in arcsec
self.beam_bmin= 6.5 # in arcsec
self.beam_bpa= 0 # in deg
self.beam_area= self.compute_beam_area(self.beam_bmaj,self.beam_bmin) # in pixels
self.zmin= 1 # in sigmas
self.zmax= 30 # in sigmas
self.npixels_min= 5
self.beam_bpa_min= -90 # deg
self.beam_bpa_max= 90 # deg
self.beam_bmaj_min= 4 # arcsec
self.beam_bmaj_max= 10 # arcsec
self.beam_bmin_min= 4 # arcsec
self.beam_bmin_max= 10 # arcsec
self.Smin= 1.e-6 # in Jy
self.Smax= 1 # in Jy
self.Smodel= 'uniform'
self.Sslope= 1.6
## Extended source parameters
self.simulate_ext_sources= True
self.ext_nsources= 0 # default is density generator
self.ext_source_type= -1 # all source models generated
self.ext_source_density= 10 # in sources/deg^2
self.Smin_ext= 1.e-6 # in Jy
self.Smax_ext= 1 # in Jy
self.zmin_ext= 0.5 # in sigmas
self.zmax_ext= 5 # in sigmas
self.ring_rmin= 2. # in arcsec
self.ring_rmax= 10. # in arcsec
self.ring_width_min= 5 # in arcsec
self.ring_width_max= 10 # in arcsec
self.ellipse_rmin= 1 # in arcsec
self.ellipse_rmax= 10 # in arcsec
self.ellipse_rratiomin= 0.7 # ratio rmin/rmax
self.disk_rmin= 2 # in arcsec
self.disk_rmax= 10 # in arcsec
self.shell_disk_ampl_ratio_min= 0.1
self.shell_disk_ampl_ratio_max= 0.8
self.shell_disk_radius_ratio_min= 0.6
self.shell_disk_radius_ratio_max= 0.9
self.sersic_radius= 10 # in arcsec
self.sersic_ellipticity= 0.5
self.sersic_index= 4
## Map output file
self.mapfilename= 'simmap.fits'
self.modelfilename= 'skymodel.fits'
## Ascii output file
self.source_par_outfile= 'point_sources.dat'
## DS9 output file
self.ds9filename= 'ds9region.reg'
## CASA region output file
self.casafilename= 'casamask.dat'
## Caesar img & sources
self.outfilename= 'SimOutput.root'
self.outfile= None
self.outtree= None
self.cs = None
self.caesar_sources= []
self.caesar_img= None
def init(self):
""" Initialize data """
## Initialize output tree & file
self.outfile= ROOT.TFile(self.outfilename,'RECREATE')
self.outtree= ROOT.TTree('SourceInfo','SourceInfo')
self.cs = Caesar.Source()
self.outtree.Branch('Source',self.cs)
def set_mask_box_size(self,boxsize):
""" Set mask box size """
if boxsize<=0:
raise ValueError('Invalid boxsize specified (shall be larger than 0')
self.mask_boxsize= boxsize
def set_margins(self,marginx,marginy):
""" Set margin in X & Y """
if (marginx<0 or marginy<0 or marginx>=self.nx/2 or marginy>=self.ny/2) :
raise ValueError('Invalid margin specified (<0 or larger than image half size!')
self.marginx= marginx
self.marginy= marginy
def set_ref_pix(self,x,y):
""" Set reference pixel (CRPIX1,CRPIX2) in FITS output """
self.crpix1= x
self.crpix2= y
def set_ref_pix_coords(self,x,y):
""" Set reference pixel coords (CRPIX1,CRPIX2) in FITS output """
self.crval1= x
self.crval2= y
def set_coord_system_type(self,x,y):
""" Set coord system type (CTYPE1,CTYPE2) in FITS output """
self.ctype1= x
self.ctype2= y
def set_gen_blob_img_size(self,nx,ny):
""" Set size of blob image used for compact source generation (must be odd) """
self.nx_gen= nx
self.ny_gen= ny
self.gridy_gen, self.gridx_gen = np.mgrid[0:self.ny_gen, 0:self.nx_gen]
def enable_compact_sources(self,choice):
""" Enable/disable compact source generation """
self.simulate_compact_sources= choice
def enable_extended_sources(self,choice):
""" Enable/disable extended source generation """
self.simulate_extended_sources= choice
def enable_bkg(self,choice):
""" Enable/disable bkg generation """
self.simulate_bkg= choice
def set_npixels_min(self,value):
""" Set the minimum number of pixels for a generated source"""
self.npixels_min= value
def enable_model_truncation(self,choice):
""" Enable/disable continuous model truncation (gaussian, airy disk, ...) """
self.truncate_models= choice
def set_model_trunc_significance(self,value):
""" Set the significance level below which source model data are truncated """
self.trunc_model_zmin= value
def set_model_trunc_thr(self,value):
""" Set the flux percentage level for source model truncation """
self.trunc_thr= value
def set_ext_source_type(self,value):
""" Set the extended source type to be generated (-1=all, 1=ring, 2=ellipse, 3=bubble+shell, 4=airy)"""
self.ext_source_type= value
def set_ds9region_filename(self,filename):
""" Set the output DS9 region filename """
self.ds9filename= filename
def set_casaregion_filename(self,filename):
""" Set the output CASA region filename """
self.casafilename= filename
def set_map_filename(self,filename):
""" Set the output map filename """
self.mapfilename= filename
def set_model_filename(self,filename):
""" Set the output model filename """
self.modelfilename= filename
def set_source_filename(self,filename):
""" Set the output source ROOT filename """
self.outfilename= filename
def set_source_significance_range(self,zmin,zmax):
""" Set source significance range """
self.zmin= zmin
self.zmax= zmax
def set_ext_source_significance_range(self,zmin,zmax):
""" Set source significance range """
self.zmin_ext= zmin
self.zmax_ext= zmax
def set_nsources(self,n):
""" Set number of sources to be generated """
if n<0:
raise ValueError('Invalid number of sources specified (shall be >=0)')
self.nsources= n
def set_source_density(self,density):
""" Set compact source density in deg^-2 """
self.source_density= density
def set_source_flux_rand_model(self,model):
""" Set the source flux random model """
self.Smodel= model
def set_source_flux_rand_exp_slope(self,slope):
""" Set the source flux expo model slope par """
self.Sslope= slope
def set_source_flux_range(self,Smin,Smax):
""" Set source flux range """
self.Smin= Smin
self.Smax= Smax
def set_ext_source_flux_range(self,Smin,Smax):
""" Set source flux range """
self.Smin_ext= Smin
self.Smax_ext= Smax
def set_beam_bmaj_range(self,bmaj_min,bmaj_max):
""" Set beam bmaj range """
self.beam_bmaj_min= bmaj_min
self.beam_bmaj_max= bmaj_max
def set_beam_bmin_range(self,bmin_min,bmin_max):
""" Set beam bmin range """
self.beam_bmin_min= bmin_min
self.beam_bmin_max= bmin_max
def set_beam_pa_range(self,pa_min,pa_max):
""" Set beam pa range """
self.beam_bpa_min= pa_min
self.beam_bpa_max= pa_max
def set_ext_nsources(self,n):
""" Set number of extended sources to be generated """
if n<0:
raise ValueError('Invalid number of sources specified (shall be >=0)')
self.ext_nsources= n
def set_ext_source_density(self,density):
""" Set extended source density in deg^-2 """
self.ext_source_density= density
def set_ring_pars(self,rmin,rmax,wmin,wmax):
""" Set ring model parameters"""
self.ring_rmin= rmin
self.ring_rmax= rmax
self.ring_width_min= wmin
self.ring_width_max= wmax
def set_sersic_pars(self,radius,ell,index):
""" Set Sersic model pars"""
self.sersic_radius= radius
self.sersis_ellipticity= ell
self.sersic_index= index
def set_disk_pars(self,rmin,rmax):
""" Set disk model parameters"""
self.disk_rmin= rmin
self.disk_rmax= rmax
def set_disk_shell_pars(self,ampl_ratio_min,ampl_ratio_max,radius_ratio_min,radius_ratio_max):
""" Set disk shell model parameters"""
self.shell_disk_ampl_ratio_min= ampl_ratio_min
self.shell_disk_ampl_ratio_max= ampl_ratio_max
self.shell_disk_radius_ratio_min= radius_ratio_min
self.shell_disk_radius_ratio_max= radius_ratio_max
def set_ellipse_pars(self,rmin,rmax):
""" Set ring model parameters"""
self.ellipse_rmin= rmin
self.ellipse_rmax= rmax
def set_bkg_pars(self,bkg_level,bkg_rms):
""" Set bkg parameters """
self.bkg_level= bkg_level
self.bkg_rms= bkg_rms
def set_beam_info(self,Bmaj,Bmin,Bpa):
""" Set beam info """
self.beam_bmaj= Bmaj
self.beam_bmin= Bmin
self.beam_bpa= Bpa
self.beam_area= self.compute_beam_area(Bmaj,Bmin)
def compute_beam_area(self,Bmaj,Bmin):
""" Compute beam area """
A= np.pi*Bmaj*Bmin/(4*np.log(2)) #2d gaussian area with FWHM=fx,fy (in arcsec^2)
pixelArea= np.fabs(self.pixsize*self.pixsize) # in arcsec^2
beam_area= A/pixelArea # in pixels
return beam_area
def compute_beam_sigma(self,fwhm):
""" """
sigma= fwhm/(2.*np.sqrt(2.*np.log(2.)))
return sigma
def check_random_state(self,seed):
""" Turn seed into a np.random.RandomState instance """
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState instance' % seed)
def make_gaus_noise_image(self,shape, mean=None, stddev=None,random_state=None):
""" Generate gaussian noise numpy array """
if mean is None:
raise ValueError('"mean" must be input')
if stddev is None:
raise ValueError('"stddev" must be input for Gaussian noise')
prng = self.check_random_state(random_state)
image = prng.normal(loc=mean, scale=stddev, size=shape)
return image
def generate_bkg(self):
""" Generate bkg data """
shape = (self.ny, self.nx)
#bkg_data = make_noise_image(shape, type='gaussian', mean=self.bkg_level, stddev=self.bkg_rms)
bkg_data = self.make_gaus_noise_image(shape, mean=self.bkg_level, stddev=self.bkg_rms)
return bkg_data
def generate_blob(self,ampl,x0,y0,sigmax,sigmay,theta,trunc_thr=0.01):
""" Generate a blob
Arguments:
ampl: peak flux in Jy
x0, y0: gaussian means in pixels
sigmax, sigmay: gaussian sigmas in pixels
theta: rotation in degrees
trunc_thr: truncation significance threshold
"""
#modelFcn= Gaussian2D(ampl,x0,y0,sigmax,sigmay,theta=math.radians(theta))
data= Gaussian2D(ampl,x0,y0,sigmax,sigmay,theta=math.radians(theta))(self.gridx, self.gridy)
## Truncate data such that sum(data)_trunc/sum(data)<f
f= trunc_thr
if self.truncate_models:
totFlux= (float)(np.sum(data,axis=None))
#print('Blob total flux=%s' % str(totFlux))
data_vect_sorted= np.ravel(data)
data_csum= np.cumsum(data_vect_sorted)/totFlux
fluxThr= data_vect_sorted[np.argmin(data_csum<f)]
#print('Blob fluxThr=%s' % str(fluxThr))
data[data<fluxThr] = 0
## Truncate data at minimum significance
#ampl_min= (trunc_thr*self.bkg_rms) + self.bkg_level
#if self.truncate_models:
# data[data<ampl_min] = 0
return data
def generate_blob_faster(self,ampl,x0,y0,sigmax,sigmay,theta,trunc_thr=0.01):
""" Generate a blob
Arguments:
ampl: peak flux in Jy
x0, y0: gaussian means in pixels
sigmax, sigmay: gaussian sigmas in pixels
theta: rotation in degrees
trunc_thr: truncation significance threshold
"""
data= Gaussian2D(ampl,x0,y0,sigmax,sigmay,theta=math.radians(theta))(self.gridx_gen, self.gridy_gen)
## Truncate data such that sum(data)_trunc/sum(data)<f
f= trunc_thr
if self.truncate_models:
totFlux= (float)(np.sum(data,axis=None))
data_vect_sorted= np.ravel(data)
data_csum= np.cumsum(data_vect_sorted)/totFlux
fluxThr= data_vect_sorted[np.argmin(data_csum<f)]
data[data<fluxThr] = 0
return data
def generate_ring(self,ampl,x0,y0,radius,width):
""" Generate a ring
Arguments:
ampl: peak flux in Jy
x0, y0: means in pixels
radius: ring radius in pixels
width: ring width in pixels
"""
data= Ring2D(ampl,x0,y0,radius,width)(self.gridx, self.gridy)
return data
def generate_ring_sector(self,ampl,x0,y0,radius,width,theta_min,theta_max):
""" Generate a ring
Arguments:
ampl: peak flux in Jy
x0, y0: means in pixels
radius: ring radius in pixels
width: ring width in pixels
theta_min, theta_max: sector theta min/max in degrees
"""
data= RingSector2D(ampl,x0,y0,radius,width,np.radians(theta_min),np.radians(theta_max))(self.gridx, self.gridy)
return data
def generate_bubble(self,ampl,x0,y0,radius,shell_ampl,shell_radius,shell_width,shell_theta_min,shell_theta_max):
""" Generate a bubble with a shell """
disk_data= Disk2D(ampl,x0,y0,radius)(self.gridx, self.gridy)
shell_data= self.generate_ring_sector(shell_ampl,x0,y0,shell_radius,shell_width,shell_theta_min,shell_theta_max)
data= disk_data + shell_data
return data
def generate_disk(self,ampl,x0,y0,radius):
""" Generate a disk """
data= Disk2D(ampl,x0,y0,radius)(self.gridx, self.gridy)
return data
def generate_ellipse(self,ampl,x0,y0,a,b,theta):
""" Generate ellipse """
data= Ellipse2D(ampl,x0,y0,a,b,math.radians(theta))(self.gridx, self.gridy)
return data
def generate_airy_disk(self,ampl,x0,y0,radius,trunc_thr=0.01):
""" Generate Airy disk """
data= AiryDisk2D(amplitude=ampl,x_0=x0,y_0=y0,radius=radius)(self.gridx, self.gridy)
totFlux= (float)(np.sum(data,axis=None))
## Truncate data such that sum(data)_trunc/sum(data)<f
f= trunc_thr
if self.truncate_models:
data_vect_sorted= np.ravel(data)
data_csum= np.cumsum(data_vect_sorted)/totFlux
fluxThr= data_vect_sorted[np.argmin(data_csum<f)]
data[data<fluxThr] = 0
## Truncate data at minimum significance
#ampl_min= (self.zmin_ext*self.bkg_rms) + self.bkg_level
#if self.truncate_models:
# data[data<ampl_min] = 0
return data
def generate_sersic(self,ampl,x0,y0,radius,ell,index,theta,trunc_thr=0.01):
""" Generate Sersic model """
data= Sersic2D(amplitude=ampl,x_0=x0,y_0=y0,r_eff=radius,n=index,ellip=ell,theta=math.radians(theta))(self.gridx, self.gridy)
totFlux= (float)(np.sum(data,axis=None))
## Truncate data such that sum(data)_trunc/sum(data)<f
f= trunc_thr
if self.truncate_models:
data_vect_sorted= np.ravel(data)
data_csum= np.cumsum(data_vect_sorted)/totFlux
fluxThr= data_vect_sorted[np.argmin(data_csum<f)]
data[data<fluxThr] = 0
## Truncate data at minimum significance
#ampl_min= (self.zmin_ext*self.bkg_rms) + self.bkg_level
#if self.truncate_models:
# data[data<ampl_min] = 0
return data
def make_caesar_source(self,source_data,source_name,source_id,source_type,source_sim_type,ampl=None,x0=None,y0=None,source_max_scale=None,offsetx=0,offsety=0):
""" Create Caesar source from source data array """
# Create Caesar source
source= Caesar.Source()
# Get source indexes and fill pixels in Caesar source
source_indexes= np.column_stack(np.where(source_data!=0))
nRows= (source_data.shape)[0]
nCols= (source_data.shape)[1]
for index in source_indexes:
rowId= index[0]
colId= index[1]
S= source_data[rowId,colId]
ix= colId
iy= rowId
#iy= nRows-1-rowId
gbin= ix + iy*nCols
pixel= Caesar.Pixel(gbin,ix,iy,ix+offsetx,iy+offsety,S)
source.AddPixel(pixel)
# Is at edge
if (ix==0) or (ix==nCols-1) or (iy==0) or (iy==nRows-1):
source.SetEdgeFlag(True)
# Retun None if npixels is too small
nPix= source.GetNPixels()
if nPix<self.npixels_min:
logger.info('Too few pixels (%s) for this source, return None!' % str(nPix))
return None
# If true info are not given compute them
# - S= count integral
# - baricenter of binary map
if x0 is None or y0 is None:
logger.info('No source true pos given, computing it from data...')
data_binary= np.where(source_data!=0,1,0)
[y0,x0]= ndimage.measurements.center_of_mass(data_binary)
x0+= offsetx
y0+= offsety
if ampl is None:
logger.info('No source true flux given, computing integral from data...')
ampl= np.sum(source_data,axis=None)
# Set some flags
source.SetName(source_name)
source.SetId(source_id)
source.SetType(source_type)
source.SetFlag(Caesar.eFake)
source.SetSimType(source_sim_type)
if source_max_scale is not None:
source.SetSimMaxScale(source_max_scale)
source.SetTrueInfo(ampl,x0,y0)
# Set flux correction factor
fluxCorrection= self.beam_area
source.SetBeamFluxIntegral(fluxCorrection)
# Compute stats & morph pars
source.ComputeStats();
source.ComputeMorphologyParams();
return source
def make_caesar_image(self,data):
""" Make Caesar image from array data """
# Get source indexes and fill pixels in Caesar source
img_indexes= np.column_stack(np.where(data!=0))
nRows= (data.shape)[0]
nCols= (data.shape)[1]
# Set metadata
metadata= Caesar.ImgMetaData()
metadata.Nx= self.nx
metadata.Ny= self.ny
metadata.Cx= (int)(self.crpix1)
metadata.Cy= (int)(self.crpix2)
metadata.Xc= self.crval1
metadata.Yc= self.crval2
metadata.dX= -self.pixsize/3600.
metadata.dY= self.pixsize/3600.
metadata.CoordTypeX= self.ctype1
metadata.CoordTypeY= self.ctype2
metadata.BUnit= 'JY/PIXEL'
metadata.Bmaj= self.beam_bmaj/3600.
metadata.Bmin= self.beam_bmin/3600.
metadata.Bpa= self.beam_bpa
# Create Caesar image
img= Caesar.Image(nCols,nRows,"img")
img.SetMetaData(metadata)
for index in img_indexes:
rowId= index[0]
colId= index[1]
S= data[rowId,colId]
ix= colId
iy= rowId
#iy= nRows-1-rowId
gbin= ix + iy*nCols
img.FillPixel(ix,iy,S,True);
return img
def generate_compact_sources(self):
""" Generate list of compact sources in the map.
- Uniform spatial distribution
- Uniform flux distribution
Arguments:
density: source density in #sources/deg^2 (e.g. 2000)
"""
# Compute number of sources to be generated given map area in pixels
#area= (self.nx*self.ny)*self.pixsize/(3600.*3600.) # in deg^2
dx_deg= (self.nx-2*self.marginx)*self.pixsize/3600.
dy_deg= (self.ny-2*self.marginy)*self.pixsize/3600.
#area= ((self.nx-2*self.marginx)*(self.ny-2*self.marginy))*self.pixsize/(3600.*3600.) # in deg^2
area= dx_deg*dy_deg
if self.nsources>0:
nsources= self.nsources
else: # density generator
nsources= int(round(self.source_density*area))
#S_min= (self.zmin*self.bkg_rms) + self.bkg_level
#S_max= (self.zmax*self.bkg_rms) + self.bkg_level
#lgS_min= np.log(S_min)
#lgS_max= np.log(S_max)
#randomize_flux= False
#if self.zmin<self.zmax:
# randomize_flux= True
S_min= self.Smin # Jy/pixel
S_max= self.Smax # Jy/pixel
lgS_min= np.log10(S_min)
lgS_max= np.log10(S_max)
randomize_flux= False
if self.Smin<self.Smax:
randomize_flux= True
## Set gaus pars generation
randomize_gaus= False
Bmaj_min= self.beam_bmaj_min
Bmaj_max= self.beam_bmaj_max
Bmin_min= self.beam_bmin_min
Bmin_max= self.beam_bmin_max
Pa_min= self.beam_bpa_min
Pa_max= self.beam_bpa_max
if self.beam_bmaj_min<self.beam_bmaj_max:
randomize_gaus= True
if self.beam_bmin_min<self.beam_bmin_max:
randomize_gaus= True
if self.beam_bpa_min<self.beam_bpa_max:
randomize_gaus= True
logger.info('Generating #%d compact sources in map...' % nsources)
# Compute blob sigma pars given beam info
sigmax= self.compute_beam_sigma(self.beam_bmaj)
sigmay= self.compute_beam_sigma(self.beam_bmin)
theta= self.beam_bpa + 90. # NB: BPA is the positional angle of the major axis measuring from North (up) counter clockwise, while theta is measured wrt to x axis
source_max_scale= 2*max(self.beam_bmaj,self.beam_bmin)
## Start generation loop
sources_data = Box2D(amplitude=0,x_0=0,y_0=0,x_width=2*self.nx, y_width=2*self.ny)(self.gridx, self.gridy)
mask_data = Box2D(amplitude=0,x_0=0,y_0=0,x_width=2*self.nx, y_width=2*self.ny)(self.gridx, self.gridy)
for index in range(0,nsources):
if index%100==0 :
logger.info("Generating compact source no. %s/%s ..." % (index+1,nsources))
## Generate random coordinates
#x0= np.random.uniform(0,self.nx)
#y0= np.random.uniform(0,self.ny)
#x0= np.random.uniform(0,self.nx-1)
#y0= np.random.uniform(0,self.ny-1)
x0= np.random.uniform(self.marginx,self.nx-self.marginx-1)
y0= np.random.uniform(self.marginy,self.ny-self.marginy-1)
ix= int(np.round(x0))
iy= int(np.round(y0))
## Compute amplitude given significance level and bkg
## Generate flux uniform in log
#if randomize_flux:
# lgS= np.random.uniform(lgS_min,lgS_max)
# S= np.exp(lgS)
# z= (S-self.bkg_level)/self.bkg_rms
#else:
# S= (self.zmin*self.bkg_rms) + self.bkg_level
# z= self.zmin
## Compute amplitude given significance level and bkg
## Generate flux uniform or expo in log
## Flux are in Jy/pixel
if randomize_flux:
if self.Smodel=='uniform':
lgS= np.random.uniform(lgS_min,lgS_max)
elif self.Smodel=='exp':
x= np.random.exponential(scale=1./self.Sslope)
lgS= x + lgS_min
if lgS>lgS_max:
continue
else:
lgS= np.random.uniform(lgS_min,lgS_max)
S= np.power(10,lgS)
else:
S= S_min
z= (S-self.bkg_level)/self.bkg_rms
## Generate gaus pars
if randomize_gaus:
bmin= random.uniform(Bmin_min,Bmin_max)
bmaj= random.uniform(bmin,Bmaj_max)
pa= random.uniform(Pa_min,Pa_max)
else:
bmin= self.beam_bmin_min
bmaj= self.beam_bmaj_min
pa= self.beam_bpa_min
sigmax= bmaj/(self.pixsize * SIGMA_TO_FWHM)
sigmay= bmin/(self.pixsize * SIGMA_TO_FWHM)
theta = 90 + pa # NB: BPA is the positional angle of the major axis measuring from North (up) counter clockwise, while theta is measured wrt to x axis
source_max_scale= 2*max(bmaj,bmin)
#print("bmaj=%f, bmin=%f, sigmax=%f, sigmay=%f" % (bmaj,bmin,sigmax,sigmay))
## Generate blob
t0 = time.time()
#blob_data= self.generate_blob(ampl=S,x0=x0,y0=y0,sigmax=sigmax/self.pixsize,sigmay=sigmay/self.pixsize,theta=theta,trunc_thr=self.trunc_thr)
x0_tile_gen= int(self.nx_gen/2.)
y0_tile_gen= int(self.ny_gen/2.)
#blob_data= self.generate_blob_faster(ampl=S,x0=x0_tile_gen,y0=y0_tile_gen,sigmax=sigmax/self.pixsize,sigmay=sigmay/self.pixsize,theta=theta,trunc_thr=self.trunc_thr)
blob_data= self.generate_blob_faster(ampl=S,x0=x0_tile_gen,y0=y0_tile_gen,sigmax=sigmax,sigmay=sigmay,theta=theta,trunc_thr=self.trunc_thr)
t1 = time.time()
elapsed_time = t1-t0
if blob_data is None:
logger.warn('Failed to generate blob (hint: too large trunc threshold), skip and regenerate...')
continue
logger.info('Generated blob no. %s in %s (s)' % (str(index),str(elapsed_time)) )
## - Add blob to source data
#sources_data+= blob_data
xmin_t, xmax_t= 0, self.nx_gen
ymin_t, ymax_t= 0, self.ny_gen
dx_t= int(self.nx_gen/2.)
dy_t= int(self.ny_gen/2.)
xmin, ymin = (ix - dx_t), (iy - dy_t)
xmax, ymax = (ix + dx_t + 1), (iy + dy_t +1)
if xmin<0 and xmax<0:
logger.warn('Tile outside mat along x, skip and regenerate!')
continue
if xmin>self.nx and xmax>self.nx:
logger.warn('Tile outside mat along x, skip and regenerate!')
continue
if ymin<0 and ymax<0:
logger.warn('Tile outside mat along y, skip and regenerate!')
continue
if ymin>self.ny and ymax>self.ny:
logger.warn('Tile outside mat along y, skip and regenerate!')
continue
if xmin<0:
xmin= 0
xmin_t= dx_t-ix
if ymin<0:
ymin= 0
ymin_t= dy_t-iy
if xmax>self.nx:
xmax= self.nx
xmax_t= self.nx - ix + dx_t
if ymax>self.ny:
ymax= self.ny
ymax_t= self.ny - iy + dy_t
# - Check if any generated source pixel is masked in the global mask (if provided)
if self.gmask_data is not None:
has_taken_pixels= np.any(self.gmask_data[ymin:ymax,xmin:xmax]>0)
if has_taken_pixels:
logger.info('Source pixels have been already taken in provided global mask, regenerate...')
continue
#sources_data[xmin:xmax,ymin:ymax] += blob_data[xmin_t:xmax_t,ymin_t:ymax_t]
sources_data[ymin:ymax,xmin:xmax] += blob_data[ymin_t:ymax_t,xmin_t:xmax_t]
## Set model map
mask_data[iy,ix]+= S
# Make Caesar source
#offset_x= x0 - x0_tile_gen
#offset_y= y0 - y0_tile_gen
offset_x= ix - x0_tile_gen
offset_y= iy - y0_tile_gen
source_name= 'S' + str(index+1)
source_id= index+1
source_type= Caesar.ePointLike
self.ps_list.append([source_name,x0,y0,S])
t0 = time.time()
caesar_source= self.make_caesar_source(blob_data,source_name,source_id,source_type,Caesar.eBlobLike,ampl=S,x0=x0,y0=y0,source_max_scale=source_max_scale,offsetx=offset_x,offsety=offset_y)
t1 = time.time()
elapsed_time = t1-t0
if caesar_source is None:
logger.warn('Generate source has too few pixels, skip and regenerate...')
continue
logger.info('Make Caesar source %s from generated blob in %s (s)' % (source_name,str(elapsed_time)) )
self.caesar_sources.append(caesar_source)
logger.info('Source %s: Pos(%s,%s), ix=%s, iy=%s, S=%s' % (source_name,str(x0),str(y0),str(ix),str(iy),str(S)))
return [sources_data,mask_data]
def generate_extended_sources(self):
""" Generate list of extended sources in the map.
- Uniform spatial distribution
- Uniform flux distribution
Arguments:
density: source density in #sources/deg^2 (e.g. 2000)
"""
# Compute number of sources to be generated given map area in pixels
dx_deg= (self.nx-2*self.marginx)*self.pixsize/3600.
dy_deg= (self.ny-2*self.marginy)*self.pixsize/3600.
#area= ((self.nx-2*self.marginx)*(self.ny-2*self.marginy))*self.pixsize/(3600.*3600.) # in deg^2
area= dx_deg*dy_deg
if self.ext_nsources>0:
nsources= self.ext_nsources
else:
nsources= int(round(self.ext_source_density*area))
#S_min= (self.zmin_ext*self.bkg_rms) + self.bkg_level
#S_max= (self.zmax_ext*self.bkg_rms) + self.bkg_level
S_min= self.Smin_ext
S_max= self.Smax_ext
lgS_min= np.log(S_min)
lgS_max= np.log(S_max)
randomize_flux= False
#if self.zmin_ext<self.zmax_ext:
if S_min<S_max:
randomize_flux= True
logger.info('Generating #%d extended sources in map...' % nsources)
logger.debug('zmin_ext=%s, zmax_ext=%s, Smin=%s, Smax=%s' % (str(self.zmin_ext),str(self.zmax_ext),str(S_min),str(S_max)) )
## Start generation loop
sources_data = Box2D(amplitude=0,x_0=0,y_0=0,x_width=2*self.nx, y_width=2*self.ny)(self.gridx, self.gridy)
ngen_sources= 0
if self.ext_source_type==-1:
nsource_types= 6
else:
nsource_types= 1
#for index in range(0,nsources):
while (ngen_sources<nsources):
if ngen_sources%10==0 :
logger.info("Generating extended source no. %s/%s" % (ngen_sources+1,nsources))
## Generate random coordinates
#x0= random.uniform(0,self.nx)
#y0= random.uniform(0,self.ny)
#x0= np.random.uniform(0,self.nx-1)
#y0= np.random.uniform(0,self.ny-1)
x0= np.random.uniform(self.marginx,self.nx-self.marginx-1)
y0= np.random.uniform(self.marginy,self.ny-self.marginy-1)
## Compute amplitude given significance level and bkg
## Generate flux uniform in log
#if randomize_flux:
# lgS= np.random.uniform(lgS_min,lgS_max)
# S= np.exp(lgS)
# z= (S-self.bkg_level)/self.bkg_rms
#else:
# S= (self.zmin_ext*self.bkg_rms) + self.bkg_level
# z= self.zmin_ext
if randomize_flux:
if self.Smodel=='uniform':
lgS= np.random.uniform(lgS_min,lgS_max)
elif self.Smodel=='exp':
x= np.random.exponential(scale=1./self.Sslope)
lgS= x + lgS_min
if lgS>lgS_max:
continue
else:
lgS= np.random.uniform(lgS_min,lgS_max)
S= np.power(10,lgS)
else:
S= S_min
z= (S-self.bkg_level)/self.bkg_rms
## Generate random type (1=ring, 2=ellipse, ...)
if self.ext_source_type==-1:
source_sim_type= random.randint(1, nsource_types)
else:
source_sim_type= self.ext_source_type
source_max_scale= 0.
if source_sim_type==1: # Ring2D Sector model
source_sim_type= Caesar.eRingLike
ring_r= random.uniform(self.ring_rmin,self.ring_rmax)
ring_w= random.uniform(self.ring_width_min,self.ring_width_max)
#source_data= self.generate_ring(S,x0,y0,ring_r/self.pixsize,ring_w/self.pixsize) # convert radius/width from arcsec to pixels
theta1= random.uniform(-180,180)
theta2= random.uniform(-180,180)
theta_min= min(theta1,theta2)
theta_max= max(theta1,theta2)
dtheta= theta_max-theta_min
r= ring_r
R= ring_r + ring_w
sector_diagonal= np.sqrt( r*r + R*R - 2*r*R*np.cos(np.deg2rad(dtheta)) )
sector_arc= 2*R*np.pi*dtheta/360.
source_max_scale= max(max(sector_arc,ring_w),sector_diagonal)
source_data= self.generate_ring_sector(S,x0,y0,ring_r/self.pixsize,ring_w/self.pixsize,theta_min,theta_max) # convert radius/width from arcsec to pixels
elif source_sim_type==2: # Ellipse 2D model
source_sim_type= Caesar.eEllipseLike
ellipse_bmaj= random.uniform(self.ellipse_rmin,self.ellipse_rmax)
#ellipse_bmin= random.uniform(self.ellipse_rmin,self.ellipse_rmax)
ellipse_bmin= random.uniform(max(self.ellipse_rratiomin*ellipse_bmaj,self.ellipse_rmin),self.ellipse_rmax)
ellipse_theta= random.uniform(0,360)
source_max_scale= max(ellipse_bmaj,ellipse_bmin)
source_data= self.generate_ellipse(S,x0,y0,ellipse_bmaj/self.pixsize,ellipse_bmin/self.pixsize,ellipse_theta) # convert radius/width from arcsec to pixels
elif source_sim_type==3: # bubble + shell model
source_sim_type= Caesar.eBubbleLike
bubble_r= random.uniform(self.disk_rmin,self.disk_rmax)
shell_excess= random.uniform(self.shell_disk_ampl_ratio_min,self.shell_disk_ampl_ratio_max)
shell_S= S*(1+shell_excess)
shell_r= random.uniform(bubble_r*self.shell_disk_radius_ratio_min,bubble_r*self.shell_disk_radius_ratio_max)
shell_width= random.uniform(0,bubble_r-shell_r)
theta1= random.uniform(-180,180)
theta2= random.uniform(-180,180)
theta_min= min(theta1,theta2)
theta_max= max(theta1,theta2)
source_max_scale= bubble_r*2
source_data= self.generate_bubble(S,x0,y0,bubble_r,shell_S,shell_r,shell_width,theta_min,theta_max)
#elif source_sim_type==4: # Airy disk
# source_sim_type= Caesar.eDiskLike
# disk_r= random.uniform(self.disk_rmin,self.disk_rmax)
# source_data= self.generate_airy_disk(S,x0,y0,disk_r)
elif source_sim_type==4: # Sersic
source_sim_type= Caesar.eDiskLike
sersic_r= random.uniform(self.disk_rmin,self.disk_rmax)
sersic_theta= random.uniform(0,360)
sersic_ell= random.uniform(0.7,1)
source_max_scale= 2*sersic_r
##source_data= self.generate_sersic(S,x0,y0,sersic_r,sersic_ell,self.sersic_index,sersic_theta)
source_data= self.generate_sersic(S,x0,y0,sersic_r,sersic_ell,self.sersic_index,sersic_theta,trunc_thr=self.trunc_thr)
elif source_sim_type==5: # Gaussian Blob like
source_sim_type= Caesar.eBlobLike
blob_bmaj= random.uniform(self.ellipse_rmin,self.ellipse_rmax)
#blob_bmin= random.uniform(self.ellipse_rmin,self.ellipse_rmax)
blob_bmin= random.uniform(max(self.ellipse_rratiomin*blob_bmaj,self.ellipse_rmin),blob_bmaj)
blob_theta= random.uniform(0,360)
source_max_scale= 2*max(blob_bmin,blob_bmaj)
#source_data= self.generate_blob(ampl=S,x0=x0,y0=y0,sigmax=blob_bmaj/self.pixsize,sigmay=blob_bmin/self.pixsize,theta=blob_theta,trunc_thr=self.zmin_ext)
source_data= self.generate_blob(ampl=S,x0=x0,y0=y0,sigmax=blob_bmaj/self.pixsize,sigmay=blob_bmin/self.pixsize,theta=blob_theta,trunc_thr=self.trunc_thr)
if source_data is None:
logger.warn('Failed to generate blob (hint: too large trunc threshold), skip and regenerate...')
continue
elif source_sim_type==6: # disk model
source_sim_type= Caesar.eDiskLike
disk_r= random.uniform(self.disk_rmin,self.disk_rmax)
source_max_scale= disk_r*2
source_data= self.generate_disk(S,x0,y0,disk_r)
else:
logger.warn('Invalid source type given!')
continue
## Check if source data contains all zeros (e.g. truncation removed all data)
if np.count_nonzero(source_data)<=0:
logger.warn('Generated extended source data contains all zeros, regenerate...')
continue
## Check if source pixels and its contour has been already taken before
source_indexes= (source_data!=0) # get all source data pixels (others are 0)
source_indexes_xright= (np.roll(source_data,1,axis=1)!=0)
source_indexes_xleft= (np.roll(source_data,-1,axis=1)!=0)
source_indexes_yright= (np.roll(source_data,1,axis=0)!=0)
source_indexes_yleft= (np.roll(source_data,-1,axis=0)!=0)
source_mask_indexes= (source_indexes | source_indexes_xright | source_indexes_xleft | source_indexes_yright | source_indexes_yleft)
#source_mask= np.where(source_data!=0,1,0)
taken_pixels= np.where(sources_data[source_mask_indexes]!=0) # get list of taken pixels in main mask corresponding to this source
has_taken_pixels= np.any(taken_pixels)
if has_taken_pixels:
logger.info('Source pixels have been already taken by a previously generated source, regenerate...')
continue
# - Check if pixels are taken in the global mask
if self.gmask_data is not None:
taken_pixels= np.where(self.gmask_data[source_mask_indexes]!=0) # get list of taken pixels in main mask corresponding to this source
has_taken_pixels= np.any(taken_pixels)
if has_taken_pixels:
logger.info('Source pixels have been already taken in the global mask, regenerate...')
continue
# Add to extended source data and mask
sources_data+= source_data
ngen_sources+= 1
# Set model map
ix= int(np.round(x0))
iy= int(np.round(y0))
# Make Caesar source
source_name= 'Sext' + str(ngen_sources)
source_id= ngen_sources
source_type= Caesar.eExtended
caesar_source= self.make_caesar_source(source_data,source_name,source_id,source_type,source_sim_type,None,None,None,source_max_scale)
if caesar_source is None:
logger.warn('Generate source has too few pixels, skip and regenerate...')
continue
self.caesar_sources.append(caesar_source)
logger.info('Ext Source %s: Pos(%s,%s), ix=%s, iy=%s, S=%s' % (source_name,str(x0),str(y0),str(ix),str(iy),str(S)))
return sources_data
#####################################
### GENERATE MAP ##
#####################################
def generate_map(self):
""" Generate sky map """
## == INITIALIZE DATA ==
logger.info('Initializing simulator data...')
self.init()
# - Check global mask data if given
if self.gmask_data is not None:
logger.info('Checking global mask data dimensions ...')
nx_g= self.gmask_data.shape[1]
ny_g= self.gmask_data.shape[0]
if nx_g!=self.nx:
logger.error("mask nx(%d)!=nx(%d)" % (nx_g,self.nx))
return -1
if ny_g!=self.ny:
logger.error("mask ny(%d)!=ny(%d)" % (ny_g,self.ny))
return -1
## == GENERATE EMPTY IMAGE ==
data = Box2D(amplitude=0,x_0=0,y_0=0,x_width=2*self.nx, y_width=2*self.ny)(self.gridx, self.gridy)
mask_data = Box2D(amplitude=0,x_0=0,y_0=0,x_width=2*self.nx, y_width=2*self.ny)(self.gridx, self.gridy)
## == GENERATE BKG ==
if self.simulate_bkg:
logger.info('Renerating map bkg...')
bkg_data= self.generate_bkg()
data+= bkg_data
## == GENERATE COMPACT SOURCES ==
if self.simulate_compact_sources:
logger.info('Generating compact sources...')
[compact_source_data,compact_source_mask_data] = self.generate_compact_sources()
data+= compact_source_data
mask_data+= compact_source_mask_data
## == GENERATE EXTENDED SOURCES ==
if self.simulate_extended_sources:
logger.info('Generating extended sources...')
ext_source_data = self.generate_extended_sources()
data+= ext_source_data
mask_data+= ext_source_data
## == MAKE FINAL MAP ==
logger.info('Creating final map with bkg + sources added...')
## Sum data in cumulative map
#data= bkg_data + compact_source_data + ext_source_data
#mask_data= compact_source_mask_data + ext_source_data
## Add noise in skymodel
if self.simulate_bkg:
logger.info('Add noise to skymodel map ...')
mask_data+= bkg_data
## Cast data from float64 to float32
data_casted = data.astype(np.float32)
mask_data_casted = mask_data.astype(np.float32)
## Convert data from Jy/pixel to Jy/beam
## Jy/pixel= Jy/beam / beamArea(pixels)
scaleFactor= self.beam_area
data_casted*= scaleFactor
## Create Caesar skymodel image from data (units= Jy/pixel)
logger.info('Creating Caesar image from data...')
##self.caesar_img= self.make_caesar_image(data_casted) # set toy sim map data
self.caesar_img= self.make_caesar_image(mask_data_casted) # set skymodel map data
## == WRITE MAPS TO FITS FILES ==
logger.info('Writing images to FITS...')
self.write_map(data_casted,self.mapfilename)
self.write_source_map(mask_data_casted,self.modelfilename)
## == WRITE IMG & SOURCES TO ROOT FILE ==
logger.info('Writing image & source collection to ROOT file...')
self.save()
## == WRITE DS9 REGION FILE ==
logger.info('Writing DS9 regions...')
self.write_ds9_regions()
## == WRITE ASCII FILE ==
logger.info('Writing point source parameters to ascii ...')
self.write_compact_source_par_list()
return 0
def write_compact_source_par_list(self):
""" Write cmpact source parameters to ascii file """
# - Open file
fout = open(self.source_par_outfile, 'wb')
#- Write header
header= ("# name x(pix) y(pix) S(Jy/pixel)")
fout.write(header)
fout.write('\n')
for i in range(len(self.ps_list)):
name= self.ps_list[i][0]
x= self.ps_list[i][1]
y= self.ps_list[i][2]
S= self.ps_list[i][3] # No need to convert peak flux
data= (("%s %s %s %s") % (name,x,y,S) )
fout.write(data)
fout.write('\n')
fout.flush()
def write_ds9_regions(self):
""" Write DS9 regions with sim sources """
## Open file
fout = open(self.ds9filename, 'wb')
## Write file header
fout.write('global color=white font=\"helvetica 8 normal\" edit=1 move=1 delete=1 include=1\n')
fout.write('image\n')
## Write source contour region
for item in self.caesar_sources:
regionInfo= item.GetDS9Region(True)
fout.write(regionInfo)
fout.write('\n')
fout.close();
def write_casa_mask(self,boxsize=10):
""" Write CASA mask file around simulated sources"""
## Create a WCS structure
w = wcs.WCS(naxis=2)
w.wcs.crpix = [self.crpix1, self.crpix2]
w.wcs.cdelt = np.array([-self.pixsize/3600., self.pixsize/3600.])
w.wcs.crval = [self.crval1, self.crval2]
w.wcs.ctype = [self.ctype1, self.ctype2]
#w.wcs.set_pv([(2, 1, 45.0)])
## Create mask ascii file with header
f = open(str(self.casafilename), 'wb')
f.write("#CRTFv0\n")
#f.write("global coord = J2000, color=blue\n")
## Create a CASA box around the source
for item in self.caesar_sources:
ix_min= item.GetIxMin()
ix_max= item.GetIxMax()
iy_min= item.GetIyMin()
iy_max= item.GetIyMax()
# Set box coordinates
pixcrd = np.array([[max(0,ix_min-boxsize/2.), max(0,iy_min-boxsize/2.)], [min(self.nx-1,ix_max+boxsize/2.), min(self.ny-1,iy_max+boxsize/2.)]], np.float_)
# Convert pixel coordinates to world coordinates
world = w.wcs_pix2world(pixcrd, 1)
print(world)
f.write("box [ [{0}deg,{1}deg], [{2}deg,{3}deg] ]\n".format(min(world[0,0],world[1,0]),min(world[0,1],world[1,1]),max(world[0,0],world[1,0]),max(world[0,1],world[1,1])))
# Close ascii file
f.close()
def draw_map(self,data):
""" Draw map data """
plt.imshow(data, origin='lower', cmap="hot")
pylab.show()
def write_map(self,data,outputfile):
""" Write FITS image with sim data """
# Define FITS header
header= fits.Header()
header.set('SIMPLE','T')
header.set('BITPIX','-32')
header.set('NAXIS1', str(self.nx))
header.set('NAXIS2', str(self.ny))
#header.set('NAXIS3', 1)
#header.set('NAXIS4', 1)
header.set('BUNIT', 'JY/BEAM')
header.set('BMAJ', self.beam_bmaj/3600.)
header.set('BMIN', self.beam_bmin/3600.)
header.set('BPA', self.beam_bpa)
header.set('BSCALE',1.)
header.set('BZERO',0.)
header.set('CDELT1',-self.pixsize/3600.)
header.set('CDELT2',self.pixsize/3600.)
header.set('CTYPE1',self.ctype1)
header.set('CTYPE2',self.ctype2)
header.set('CRPIX1',self.crpix1)
header.set('CRPIX2',self.crpix2)
header.set('CRVAL1',self.crval1)
header.set('CRVAL2',self.crval2)
# Define HDU
hdu = fits.PrimaryHDU(data=data,header=header)
hdulist = fits.HDUList([hdu])
hdulist.writeto(outputfile,overwrite=True)
def write_source_map(self,data,outputfile):
""" Write FITS image with sim mask data """
# Define FITS header
header= fits.Header()
header.set('SIMPLE','T')
header.set('BITPIX','-32')
header.set('NAXIS1', str(self.nx))
header.set('NAXIS2', str(self.ny))
header.set('BUNIT', 'JY/pixel')
header.set('BMAJ', self.beam_bmaj/3600.)
header.set('BMIN', self.beam_bmin/3600.)
header.set('BPA', self.beam_bpa)
header.set('BSCALE',1.)
header.set('BZERO',0.)
header.set('CDELT1',-self.pixsize/3600.)
header.set('CDELT2',self.pixsize/3600.)
header.set('CTYPE1',self.ctype1)
header.set('CTYPE2',self.ctype2)
header.set('CRPIX1',self.crpix1)
header.set('CRPIX2',self.crpix2)
header.set('CRVAL1',self.crval1)
header.set('CRVAL2',self.crval2)
# Define HDU
hdu = fits.PrimaryHDU(data=data,header=header)
hdulist = fits.HDUList([hdu])
hdulist.writeto(outputfile,overwrite=True)
def save(self):
""" Write img & source collection to ROOT file """
# Loop over sources
logger.info('Filling #%s sources to ROOT tree...' % str(len(self.caesar_sources)) )
for item in self.caesar_sources:
#self.cs= item
item.Copy(self.cs)
self.cs.Print()
self.outtree.Fill()
# Write to file
self.outfile.cd()
self.caesar_img.Write()
self.outtree.Write()
self.outfile.Close()
# Write CASA mask file
self.write_casa_mask(boxsize=self.mask_boxsize)
###########################
##############
## MAIN ##
##############
def main():
"""Main function"""
#===========================
#== Get script args
#===========================
logger.info('Get script args')
try:
args= get_args()
except Exception as ex:
logger.error("Failed to get and parse options (err=%s)",str(ex))
return 1
# - Image args
Nx= args.nx
Ny= args.ny
marginX= args.marginx
marginY= args.marginy
pixsize= args.pixsize
ctype1= args.ctype1
ctype2= args.ctype2
crpix1= args.crpix1
crpix2= args.crpix2
crval1= args.crval1
crval2= args.crval2
#- Source model
model_trunc_zmin= args.model_trunc_zmin
trunc_thr= args.trunc_thr
truncate_models= args.truncate_models
npixels_min= args.npixels_min
# - Bkg info args
enable_bkg= args.enable_bkg
bkg_level= args.bkg_level
bkg_rms= args.bkg_rms
# - Compact source args
enable_compactsources= args.enable_compactsources
nx_gen= args.nx_gen
ny_gen= args.ny_gen
Bmaj= args.bmaj
Bmin= args.bmin
Bpa= args.bpa
Zmin= args.zmin
Zmax= args.zmax
source_density= args.source_density
nsources= args.nsources
Smodel= args.Smodel
Sslope= args.Sslope
Smin= args.Smin
Smax= args.Smax
bmaj_min= args.bmaj_min
bmaj_max= args.bmaj_max
bmin_min= args.bmin_min
bmin_max= args.bmin_max
pa_min= args.pa_min
pa_max= args.pa_max
# - Extended source args
enable_extsources= args.enable_extsources
ext_source_type= args.ext_source_type
ext_nsources= args.ext_nsources
Smin_ext= args.Smin_ext
Smax_ext= args.Smax_ext
Zmin_ext= args.zmin_ext
Zmax_ext= args.zmax_ext
ext_source_density= args.ext_source_density
ext_scale_min= args.ext_scale_min
ext_scale_max= args.ext_scale_max
ring_rmin= args.ring_rmin
ring_rmax= args.ring_rmax
ring_wmin= args.ring_wmin
ring_wmax= args.ring_wmax
ellipse_rmin= args.ellipse_rmin
ellipse_rmax= args.ellipse_rmax
disk_shell_ampl_ratio_min= args.disk_shell_ampl_ratio_min
disk_shell_ampl_ratio_max= args.disk_shell_ampl_ratio_max
disk_shell_radius_ratio_min= args.disk_shell_radius_ratio_min
disk_shell_radius_ratio_max= args.disk_shell_radius_ratio_max
# - Output args
outputfile= args.outputfile
mask_outputfile= args.outputfile_model
outputfile_sources= args.outputfile_sources
outputfile_ds9region= args.outputfile_ds9region
outputfile_casaregion= args.outputfile_casaregion
mask_boxsize= args.mask_boxsize
# - Mask image
maskfile= args.maskimg
mask_data= None
if maskfile!='':
hdu= fits.open(maskfile)[0]
mask_data= hdu.data
print("*** ARGS ***")
print("Nx: %s" % Nx)
print("Ny: %s" % Ny)
print("Margin X: %s" % marginX)
print("Margin Y: %s" % marginY)
print("pixsize: %s" % pixsize)
print("ctype: (%s %s)" % (ctype1,ctype2))
print("crpix: (%s %s)" % (crpix1,crpix2))
print("crval: (%s %s)" % (crval1,crval2))
print("Beam (Bmaj/Bmin/Bpa): (%s,%s,%s)" % (Bmaj, Bmin, Bpa))
print("Enable bkg? %s" % str(enable_bkg) )
print("Bkg info (level,rms): (%s,%s)" % (bkg_level, bkg_rms))
print("Enable compact sources? %s" % str(enable_compactsources) )
print("Source significance range: (%s,%s)" % (Zmin, Zmax))
print("Source density (deg^-2): %s" % source_density)
print("Enable extended sources? %s" % str(enable_extsources) )
print("Extended source type %s" %str(ext_source_type) )
print("Extended source flux range: (%s,%s)" % (Smin_ext, Smax_ext))
print("Extended source significance range: (%s,%s)" % (Zmin_ext, Zmax_ext))
print("Extended source density (deg^-2): %s" % ext_source_density)
print("Extended source scale min/max: (%s,%s)" % (ext_scale_min, ext_scale_max))
print("Output filename: %s " % outputfile)
print("Model trunc thr: %s " % str(trunc_thr))
print("Mask output filename: %s " % mask_outputfile)
print("Mask box size: %s " % mask_boxsize)
print("************")
## Generate simulated sky map
print ('INFO: Generate simulated sky map...')
simulator= SkyMapSimulator(Nx,Ny,pixsize)
simulator.set_margins(marginX,marginY)
simulator.set_ref_pix(crpix1,crpix2)
simulator.set_ref_pix_coords(crval1,crval2)
simulator.set_coord_system_type(ctype1,ctype2)
simulator.set_model_trunc_thr(trunc_thr)
simulator.set_model_trunc_significance(model_trunc_zmin)
simulator.enable_model_truncation(truncate_models)
simulator.set_npixels_min(npixels_min)
simulator.set_map_filename(outputfile)
simulator.set_model_filename(mask_outputfile)
simulator.set_source_filename(outputfile_sources)
simulator.set_ds9region_filename(outputfile_ds9region)
simulator.set_casaregion_filename(outputfile_casaregion)
simulator.enable_bkg(enable_bkg)
simulator.set_bkg_pars(bkg_level,bkg_rms)
simulator.set_beam_info(Bmaj,Bmin,Bpa)
simulator.enable_compact_sources(enable_compactsources)
simulator.set_gen_blob_img_size(nx_gen,ny_gen)
simulator.set_nsources(nsources)
simulator.set_source_flux_rand_model(Smodel)
simulator.set_source_flux_rand_exp_slope(Sslope)
simulator.set_source_flux_range(Smin,Smax)
simulator.set_source_significance_range(Zmin,Zmax)
simulator.set_source_density(source_density)
simulator.set_beam_bmaj_range(bmaj_min,bmaj_max)
simulator.set_beam_bmin_range(bmin_min,bmin_max)
simulator.set_beam_pa_range(pa_min,pa_max)
simulator.enable_extended_sources(enable_extsources)
simulator.set_ext_nsources(ext_nsources)
simulator.set_ext_source_type(ext_source_type)
simulator.set_ext_source_flux_range(Smin_ext,Smax_ext)
simulator.set_ext_source_significance_range(Zmin_ext,Zmax_ext)
simulator.set_ext_source_density(ext_source_density)
#simulator.set_ring_pars(ring_rmin,ring_rmax,ring_wmin,ring_wmax)
simulator.set_ring_pars(ext_scale_min,ext_scale_max,ring_wmin,ring_wmax)
#simulator.set_ellipse_pars(ellipse_rmin,ellipse_rmax)
simulator.set_ellipse_pars(ext_scale_min,ext_scale_max)
simulator.set_disk_pars(ext_scale_min,ext_scale_max)
simulator.set_disk_shell_pars(disk_shell_ampl_ratio_min,disk_shell_ampl_ratio_max,disk_shell_radius_ratio_min,disk_shell_radius_ratio_max)
simulator.set_mask_box_size(mask_boxsize)
if mask_data is not None:
simulator.gmask_data= mask_data
if simulator.generate_map()<0:
print("ERROR: generate map failed!")
return 1
return 0
###################
## MAIN EXEC ##
###################
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
aalmah/pylearn2 | pylearn2/train_extensions/plots.py | 34 | 9617 | """
Plot monitoring extensions while training.
"""
__authors__ = "Laurent Dinh"
__copyright__ = "Copyright 2014, Universite de Montreal"
__credits__ = ["Laurent Dinh"]
__license__ = "3-clause BSD"
__maintainer__ = "Laurent Dinh"
__email__ = "dinhlaur@iro"
import logging
import os
import os.path
import stat
import numpy
np = numpy
from pylearn2.train_extensions import TrainExtension
from theano.compat.six.moves import xrange
from pylearn2.utils import as_floatX, wraps
if os.getenv('DISPLAY') is None:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import warnings
log = logging.getLogger(__name__)
def make_readable(fn):
"""
Make a file readable by all.
Practical when the plot is in your public_html.
Parameters
----------
fn : str
Filename you wish to make public readable.
"""
st = os.stat(fn)
# Create the desired permission
st_mode = st.st_mode
read_all = stat.S_IRUSR
read_all |= stat.S_IRGRP
read_all |= stat.S_IROTH
# Set the permission
os.chmod(fn, st_mode | read_all)
def get_best_layout(n_plots):
"""
Find the best basic layout for a given number of plots.
Minimize the perimeter with a minimum area (``n_plots``) for
an integer rectangle.
Parameters
----------
n_plots : int
The number of plots to display
Returns
-------
n_rows : int
Number of rows in the layout
n_cols :
Number of columns in the layout
"""
assert n_plots > 0
# Initialize the layout
n_rows = 1
n_cols = np.ceil(n_plots*1./n_rows)
n_cols = int(n_cols)
half_perimeter = n_cols + 1
# Limit the range of possible layouts
max_row = np.sqrt(n_plots)
max_row = np.round(max_row)
max_row = int(max_row)
for l in xrange(1, max_row + 1):
width = np.ceil(n_plots*1./l)
width = int(width)
if half_perimeter >= (width + l):
n_rows = l
n_cols = np.ceil(n_plots*1./n_rows)
n_cols = int(n_cols)
half_perimeter = n_rows + n_cols
return n_rows, n_cols
def create_colors(n_colors):
"""
Create an array of n_colors
Parameters
----------
n_colors : int
The number of colors to create
Returns
-------
colors_rgb : np.array
An array of shape (n_colors, 3) in RGB format
"""
# Create the list of color hue
colors_hue = np.arange(n_colors)
colors_hue = as_floatX(colors_hue)
colors_hue *= 1./n_colors
# Set the color in HSV format
colors_hsv = np.ones((n_colors, 3))
colors_hsv[:, 2] *= .75
colors_hsv[:, 0] = colors_hue
# Put in a matplotlib-friendly format
colors_hsv = colors_hsv.reshape((1, )+colors_hsv.shape)
# Convert to RGB
colors_rgb = matplotlib.colors.hsv_to_rgb(colors_hsv)
colors_rgb = colors_rgb[0]
return colors_rgb
class Plotter(object):
"""
Base class for plotting.
Parameters
----------
freq : int, optional
The number of epochs before producing plot.
Default is None (set by the PlotManager).
"""
def __init__(self, freq=None):
self.filenames = []
self.freq = freq
def setup(self, model, dataset, algorithm):
"""
Setup the plotters.
Parameters
----------
model : pylearn2.models.Model
The model trained
dataset : pylearn2.datasets.Dataset
The dataset on which the model is trained
algorithm : pylearn2.training_algorithms.TrainingAlgorithm
The algorithm the model is trained with
"""
raise NotImplementedError(str(type(self))+" does not implement setup.")
def plot(self):
"""
The method that draw and save the desired figure, which depend
on the object and its attribute. This method is called by the
PlotManager object as frequently as the `freq` attribute defines it.
"""
raise NotImplementedError(str(type(self))+" does not implement plot.")
def set_permissions(self, public):
"""
Make the produced files readable by everyone.
Parameters
----------
public : bool
If public is True, then the associated files are
readable by everyone.
"""
if public:
for filename in self.filenames:
make_readable(filename)
class Plots(Plotter):
"""
Plot different monitors.
Parameters
----------
channel_names : list of str
List of monitor channels to plot
save_path : str
Filename of the plot file
share : float, optional
The percentage of epochs shown. Default is .8 (80%)
per_second : bool, optional
Set if the x-axis is in seconds, in epochs otherwise.
Default is False.
kwargs : dict
Passed on to the superclass.
"""
def __init__(self, channel_names,
save_path, share=.8,
per_second=False,
** kwargs):
super(Plots, self).__init__(** kwargs)
if not save_path.endswith('.png'):
save_path += '.png'
self.save_path = save_path
self.filenames = [self.save_path]
self.channel_names = channel_names
self.n_colors = len(self.channel_names)
self.colors_rgb = create_colors(self.n_colors)
self.share = share
self.per_second = per_second
@wraps(Plotter.setup)
def setup(self, model, dataset, algorithm):
self.model = model
@wraps(Plotter.plot)
def plot(self):
monitor = self.model.monitor
channels = monitor.channels
channel_names = self.channel_names
# Accumulate the plots
plots = np.array(channels[channel_names[0]].val_record)
plots = plots.reshape((1, plots.shape[0]))
plots = plots.repeat(self.n_colors, axis=0)
for i, channel_name in enumerate(channel_names[1:]):
plots[i+1] = np.array(channels[channel_name].val_record)
# Keep the relevant part
n_min = plots.shape[1]
n_min -= int(np.ceil(plots.shape[1] * self.share))
plots = plots[:, n_min:]
# Get the x axis
x = np.arange(plots.shape[1])
x += n_min
# Put in seconds if needed
if self.per_second:
seconds = channels['training_seconds_this_epoch'].val_record
seconds = np.array(seconds)
seconds = seconds.cumsum()
x = seconds[x]
# Plot the quantities
plt.figure()
for i in xrange(self.n_colors):
plt.plot(x, plots[i], color=self.colors_rgb[i],
alpha=.5)
plt.legend(self.channel_names)
plt.xlim(x[0], x[-1])
plt.ylim(plots.min(), plots.max())
plt.axis('on')
plt.savefig(self.save_path)
plt.close()
class PlotManager(TrainExtension):
"""
Class to manage the Plotter classes.
Parameters
----------
plots : list of pylearn2.train_extensions.Plotter
List of plots to make during training
freq : int
The default number of epochs before producing plot.
public : bool
Whether the files are made public or not. Default is true.
html_path : str
The path where the HTML page is saved. The associated files should be
in the same folder. Default is None, then there is no HTML page.
"""
def __init__(self, plots, freq, public=True, html_path=None):
self.plots = plots
self.freq = freq
# Set a default freq
for plot in self.plots:
if plot.freq is None:
plot.freq = self.freq
self.public = public
self.html_path = html_path
self.filenames = []
self.count = 0
@wraps(TrainExtension.setup)
def setup(self, model, dataset, algorithm):
for plot in self.plots:
plot.setup(model, dataset, algorithm)
for filename in plot.filenames:
warn = ("/home/www-etud/" in filename)
warn |= (os.environ['HOME'] in filename)
warn &= ('umontreal' in os.environ['HOSTNAME'])
if warn:
warnings.warn('YOU MIGHT RUIN THE NFS'
'BY SAVING IN THIS PATH !')
self.filenames.append(filename)
if self.html_path is not None:
header = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<html xmlns="http://www.w3.org/1999/xhtml"'
'xml:lang="en">\n'
'\t<body>\n')
footer = ('\t</body>\n'
'</html>')
body = ''
for filename in self.filenames:
basename = os.path.basename(filename)
body += '<img src = "' + basename + '"><br/>\n'
with open(self.html_path, 'w') as f:
f.write(header + body + footer)
f.close()
if self.public:
make_readable(self.html_path)
@wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
self.count += 1
for plot in self.plots:
if self.count % plot.freq == 0:
try:
plot.plot()
plot.set_permissions(self.public)
except Exception as e:
warnings.warn(str(plot) + ' has failed.\n'
+ str(e))
| bsd-3-clause |
a-parhom/edx-platform | lms/djangoapps/course_api/blocks/tests/test_api.py | 2 | 11195 | """
Tests for Blocks api.py
"""
from itertools import product
from mock import patch
import ddt
from django.test.client import RequestFactory
from django.test.utils import override_settings
import course_blocks.api as course_blocks_api
from openedx.core.djangoapps.content.block_structure.api import clear_course_from_cache
from openedx.core.djangoapps.content.block_structure.config import STORAGE_BACKING_FOR_CACHE, waffle
from student.tests.factories import UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import SampleCourseFactory, check_mongo_calls
from xmodule.modulestore.tests.sample_courses import BlockInfo
from ..api import get_blocks
class TestGetBlocks(SharedModuleStoreTestCase):
"""
Tests for the get_blocks function
"""
shard = 4
@classmethod
def setUpClass(cls):
super(TestGetBlocks, cls).setUpClass()
with cls.store.default_store(ModuleStoreEnum.Type.split):
cls.course = SampleCourseFactory.create()
# hide the html block
cls.html_block = cls.store.get_item(cls.course.id.make_usage_key('html', 'html_x1a_1'))
cls.html_block.visible_to_staff_only = True
cls.store.update_item(cls.html_block, ModuleStoreEnum.UserID.test)
def setUp(self):
super(TestGetBlocks, self).setUp()
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
def test_basic(self):
blocks = get_blocks(self.request, self.course.location, self.user)
self.assertEquals(blocks['root'], unicode(self.course.location))
# subtract for (1) the orphaned course About block and (2) the hidden Html block
self.assertEquals(len(blocks['blocks']), len(self.store.get_items(self.course.id)) - 2)
self.assertNotIn(unicode(self.html_block.location), blocks['blocks'])
def test_no_user(self):
blocks = get_blocks(self.request, self.course.location)
self.assertIn(unicode(self.html_block.location), blocks['blocks'])
def test_access_before_api_transformer_order(self):
"""
Tests the order of transformers: access checks are made before the api
transformer is applied.
"""
blocks = get_blocks(self.request, self.course.location, self.user, nav_depth=5, requested_fields=['nav_depth'])
vertical_block = self.store.get_item(self.course.id.make_usage_key('vertical', 'vertical_x1a'))
problem_block = self.store.get_item(self.course.id.make_usage_key('problem', 'problem_x1a_1'))
vertical_descendants = blocks['blocks'][unicode(vertical_block.location)]['descendants']
self.assertIn(unicode(problem_block.location), vertical_descendants)
self.assertNotIn(unicode(self.html_block.location), vertical_descendants)
def test_sub_structure(self):
sequential_block = self.store.get_item(self.course.id.make_usage_key('sequential', 'sequential_y1'))
blocks = get_blocks(self.request, sequential_block.location, self.user)
self.assertEquals(blocks['root'], unicode(sequential_block.location))
self.assertEquals(len(blocks['blocks']), 5)
for block_type, block_name, is_inside_of_structure in (
('vertical', 'vertical_y1a', True),
('problem', 'problem_y1a_1', True),
('chapter', 'chapter_y', False),
('sequential', 'sequential_x1', False),
):
block = self.store.get_item(self.course.id.make_usage_key(block_type, block_name))
if is_inside_of_structure:
self.assertIn(unicode(block.location), blocks['blocks'])
else:
self.assertNotIn(unicode(block.location), blocks['blocks'])
def test_filtering_by_block_types(self):
sequential_block = self.store.get_item(self.course.id.make_usage_key('sequential', 'sequential_y1'))
# not filtered blocks
blocks = get_blocks(self.request, sequential_block.location, self.user, requested_fields=['type'])
self.assertEquals(len(blocks['blocks']), 5)
found_not_problem = False
for block in blocks['blocks'].itervalues():
if block['type'] != 'problem':
found_not_problem = True
self.assertTrue(found_not_problem)
# filtered blocks
blocks = get_blocks(self.request, sequential_block.location, self.user,
block_types_filter=['problem'], requested_fields=['type'])
self.assertEquals(len(blocks['blocks']), 3)
for block in blocks['blocks'].itervalues():
self.assertEqual(block['type'], 'problem')
# TODO: Remove this class after REVE-52 lands and old-mobile-app traffic falls to < 5% of mobile traffic
@ddt.ddt
class TestGetBlocksMobileHack(SharedModuleStoreTestCase):
"""
Tests that requests from the mobile app don't receive empty containers.
"""
shard = 4
@classmethod
def setUpClass(cls):
super(TestGetBlocksMobileHack, cls).setUpClass()
with cls.store.default_store(ModuleStoreEnum.Type.split):
cls.course = SampleCourseFactory.create(
block_info_tree=[
BlockInfo('empty_chapter', 'chapter', {}, [
BlockInfo('empty_sequential', 'sequential', {}, [
BlockInfo('empty_vertical', 'vertical', {}, []),
]),
]),
BlockInfo('full_chapter', 'chapter', {}, [
BlockInfo('full_sequential', 'sequential', {}, [
BlockInfo('full_vertical', 'vertical', {}, [
BlockInfo('html', 'html', {}, []),
]),
]),
])
]
)
def setUp(self):
super(TestGetBlocksMobileHack, self).setUp()
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
@ddt.data(
*product([True, False], ['chapter', 'sequential', 'vertical'])
)
@ddt.unpack
def test_empty_containers(self, is_mobile, container_type):
with patch('lms.djangoapps.course_api.blocks.api.is_request_from_mobile_app', return_value=is_mobile):
blocks = get_blocks(self.request, self.course.location)
full_container_key = self.course.id.make_usage_key(container_type, 'full_{}'.format(container_type))
self.assertIn(str(full_container_key), blocks['blocks'])
empty_container_key = self.course.id.make_usage_key(container_type, 'empty_{}'.format(container_type))
assert_containment = self.assertNotIn if is_mobile else self.assertIn
assert_containment(str(empty_container_key), blocks['blocks'])
@ddt.ddt
class TestGetBlocksQueryCountsBase(SharedModuleStoreTestCase):
"""
Base for the get_blocks tests.
"""
shard = 4
ENABLED_SIGNALS = ['course_published']
def setUp(self):
super(TestGetBlocksQueryCountsBase, self).setUp()
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
def _create_course(self, store_type):
"""
Creates the sample course in the given store type.
"""
with self.store.default_store(store_type):
return SampleCourseFactory.create()
def _get_blocks(self, course, expected_mongo_queries, expected_sql_queries):
"""
Verifies the number of expected queries when calling
get_blocks on the given course.
"""
with check_mongo_calls(expected_mongo_queries):
with self.assertNumQueries(expected_sql_queries):
get_blocks(self.request, course.location, self.user)
@ddt.ddt
class TestGetBlocksQueryCounts(TestGetBlocksQueryCountsBase):
"""
Tests query counts for the get_blocks function.
"""
shard = 4
@ddt.data(
*product(
(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split),
(True, False),
)
)
@ddt.unpack
def test_query_counts_cached(self, store_type, with_storage_backing):
with waffle().override(STORAGE_BACKING_FOR_CACHE, active=with_storage_backing):
course = self._create_course(store_type)
self._get_blocks(
course,
expected_mongo_queries=0,
expected_sql_queries=9 if with_storage_backing else 8,
)
@ddt.data(
*product(
((ModuleStoreEnum.Type.mongo, 5), (ModuleStoreEnum.Type.split, 3)),
(True, False),
)
)
@ddt.unpack
def test_query_counts_uncached(self, store_type_tuple, with_storage_backing):
store_type, expected_mongo_queries = store_type_tuple
with waffle().override(STORAGE_BACKING_FOR_CACHE, active=with_storage_backing):
course = self._create_course(store_type)
clear_course_from_cache(course.id)
if with_storage_backing:
num_sql_queries = 19
else:
num_sql_queries = 9
self._get_blocks(
course,
expected_mongo_queries,
expected_sql_queries=num_sql_queries,
)
@ddt.ddt
@override_settings(FIELD_OVERRIDE_PROVIDERS=(course_blocks_api.INDIVIDUAL_STUDENT_OVERRIDE_PROVIDER, ))
class TestQueryCountsWithIndividualOverrideProvider(TestGetBlocksQueryCountsBase):
"""
Tests query counts for the get_blocks function when IndividualStudentOverrideProvider is set.
"""
shard = 4
@ddt.data(
*product(
(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split),
(True, False),
)
)
@ddt.unpack
def test_query_counts_cached(self, store_type, with_storage_backing):
with waffle().override(STORAGE_BACKING_FOR_CACHE, active=with_storage_backing):
course = self._create_course(store_type)
self._get_blocks(
course,
expected_mongo_queries=0,
expected_sql_queries=10 if with_storage_backing else 9,
)
@ddt.data(
*product(
((ModuleStoreEnum.Type.mongo, 5), (ModuleStoreEnum.Type.split, 3)),
(True, False),
)
)
@ddt.unpack
def test_query_counts_uncached(self, store_type_tuple, with_storage_backing):
store_type, expected_mongo_queries = store_type_tuple
with waffle().override(STORAGE_BACKING_FOR_CACHE, active=with_storage_backing):
course = self._create_course(store_type)
clear_course_from_cache(course.id)
if with_storage_backing:
num_sql_queries = 20
else:
num_sql_queries = 10
self._get_blocks(
course,
expected_mongo_queries,
expected_sql_queries=num_sql_queries,
)
| agpl-3.0 |
roxyboy/scikit-learn | examples/cluster/plot_segmentation_toy.py | 257 | 3336 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
aalmah/pylearn2 | pylearn2/utils/datasets.py | 44 | 9068 | """
Several utilities to evaluate an ALC on the dataset, to iterate over
minibatches from a dataset, or to merge three data with given proportions
"""
# Standard library imports
import logging
import os
import functools
from itertools import repeat
import warnings
# Third-party imports
import numpy
import scipy
from theano.compat.six.moves import reduce, xrange
import theano
try:
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
except ImportError:
warnings.warn("Could not import some dependencies.")
# Local imports
from pylearn2.utils.rng import make_np_rng
logger = logging.getLogger(__name__)
##################################################
# 3D Visualization
##################################################
def do_3d_scatter(x, y, z, figno=None, title=None):
"""
Generate a 3D scatterplot figure and optionally give it a title.
Parameters
----------
x : WRITEME
y : WRITEME
z : WRITEME
figno : WRITEME
title : WRITEME
"""
fig = pyplot.figure(figno)
ax = Axes3D(fig)
ax.scatter(x, y, z)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
pyplot.suptitle(title)
def save_plot(repr, path, name="figure.pdf", title="features"):
"""
.. todo::
WRITEME
"""
# TODO : Maybe run a PCA if shape[1] > 3
assert repr.get_value(borrow=True).shape[1] == 3
# Take the first 3 columns
x, y, z = repr.get_value(borrow=True).T
do_3d_scatter(x, y, z)
# Save the produces figure
filename = os.path.join(path, name)
pyplot.savefig(filename, format="pdf")
logger.info('... figure saved: {0}'.format(filename))
##################################################
# Features or examples filtering
##################################################
def filter_labels(train, label, classes=None):
"""
Filter examples of train for which we have labels
Parameters
----------
train : WRITEME
label : WRITEME
classes : WRITEME
Returns
-------
WRITEME
"""
if isinstance(train, theano.tensor.sharedvar.SharedVariable):
train = train.get_value(borrow=True)
if isinstance(label, theano.tensor.sharedvar.SharedVariable):
label = label.get_value(borrow=True)
if not (isinstance(train, numpy.ndarray) or scipy.sparse.issparse(train)):
raise TypeError('train must be a numpy array, a scipy sparse matrix,'
' or a theano shared array')
# Examples for which any label is set
if classes is not None:
label = label[:, classes]
# Special case for sparse matrices
if scipy.sparse.issparse(train):
idx = label.sum(axis=1).nonzero()[0]
return (train[idx], label[idx])
# Compress train and label arrays according to condition
condition = label.any(axis=1)
return tuple(var.compress(condition, axis=0) for var in (train, label))
def nonzero_features(data, combine=None):
"""
Get features for which there are nonzero entries in the data.
Parameters
----------
data : list of matrices
List of data matrices, either in sparse format or not.
They must have the same number of features (column number).
combine : function, optional
A function to combine elementwise which features to keep.
Default keeps the intersection of each non-zero columns.
Returns
-------
indices : ndarray object
Indices of the nonzero features.
Notes
-----
I would return a mask (bool array) here, but scipy.sparse doesn't appear to
fully support advanced indexing.
"""
if combine is None:
combine = functools.partial(reduce, numpy.logical_and)
# Assumes all values are >0, which is the case for all sparse datasets.
masks = numpy.asarray([subset.sum(axis=0) for subset in data]).squeeze()
nz_feats = combine(masks).nonzero()[0]
return nz_feats
# TODO: Is this a duplicate?
def filter_nonzero(data, combine=None):
"""
Filter non-zero features of data according to a certain combining function
Parameters
----------
data : list of matrices
List of data matrices, either in sparse format or not.
They must have the same number of features (column number).
combine : function
A function to combine elementwise which features to keep.
Default keeps the intersection of each non-zero columns.
Returns
-------
indices : ndarray object
Indices of the nonzero features.
"""
nz_feats = nonzero_features(data, combine)
return [set[:, nz_feats] for set in data]
##################################################
# Iterator object for minibatches of datasets
##################################################
class BatchIterator(object):
"""
Builds an iterator object that can be used to go through the minibatches
of a dataset, with respect to the given proportions in conf
Parameters
----------
dataset : WRITEME
set_proba : WRITEME
batch_size : WRITEME
seed : WRITEME
"""
def __init__(self, dataset, set_proba, batch_size, seed=300):
# Local shortcuts for array operations
flo = numpy.floor
sub = numpy.subtract
mul = numpy.multiply
div = numpy.divide
mod = numpy.mod
# Record external parameters
self.batch_size = batch_size
if (isinstance(dataset[0], theano.Variable)):
self.dataset = [set.get_value(borrow=True) for set in dataset]
else:
self.dataset = dataset
# Compute maximum number of samples for one loop
set_sizes = [set.shape[0] for set in self.dataset]
set_batch = [float(self.batch_size) for i in xrange(3)]
set_range = div(mul(set_proba, set_sizes), set_batch)
set_range = map(int, numpy.ceil(set_range))
# Upper bounds for each minibatch indexes
set_limit = numpy.ceil(numpy.divide(set_sizes, set_batch))
self.limit = map(int, set_limit)
# Number of rows in the resulting union
set_tsign = sub(set_limit, flo(div(set_sizes, set_batch)))
set_tsize = mul(set_tsign, flo(div(set_range, set_limit)))
l_trun = mul(flo(div(set_range, set_limit)), mod(set_sizes, set_batch))
l_full = mul(sub(set_range, set_tsize), set_batch)
self.length = sum(l_full) + sum(l_trun)
# Random number generation using a permutation
index_tab = []
for i in xrange(3):
index_tab.extend(repeat(i, set_range[i]))
# Use a deterministic seed
self.seed = seed
rng = make_np_rng(seed, which_method="permutation")
self.permut = rng.permutation(index_tab)
def __iter__(self):
"""Generator function to iterate through all minibatches"""
counter = [0, 0, 0]
for chosen in self.permut:
# Retrieve minibatch from chosen set
index = counter[chosen]
minibatch = self.dataset[chosen][
index * self.batch_size:(index + 1) * self.batch_size
]
# Increment the related counter
counter[chosen] = (counter[chosen] + 1) % self.limit[chosen]
# Return the computed minibatch
yield minibatch
def __len__(self):
"""Return length of the weighted union"""
return self.length
def by_index(self):
"""Same generator as __iter__, but yield only the chosen indexes"""
counter = [0, 0, 0]
for chosen in self.permut:
index = counter[chosen]
counter[chosen] = (counter[chosen] + 1) % self.limit[chosen]
yield chosen, index
##################################################
# Miscellaneous
##################################################
def minibatch_map(fn, batch_size, input_data, output_data=None,
output_width=None):
"""
Apply a function on input_data, one minibatch at a time.
Storage for the output can be provided. If it is the case,
it should have appropriate size.
If output_data is not provided, then output_width should be specified.
Parameters
----------
fn : WRITEME
batch_size : WRITEME
input_data : WRITEME
output_data : WRITEME
output_width : WRITEME
Returns
-------
WRITEME
"""
if output_width is None:
if output_data is None:
raise ValueError('output_data or output_width should be provided')
output_width = output_data.shape[1]
output_length = input_data.shape[0]
if output_data is None:
output_data = numpy.empty((output_length, output_width))
else:
assert output_data.shape[0] == input_data.shape[0], ('output_data '
'should have the same length as input_data',
output_data.shape[0], input_data.shape[0])
for i in xrange(0, output_length, batch_size):
output_data[i:i+batch_size] = fn(input_data[i:i+batch_size])
return output_data
| bsd-3-clause |
gibiansky/tensorflow | tensorflow/examples/learn/iris_with_pipeline.py | 13 | 1854 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from tensorflow.contrib import learn
def main(unused_argv):
iris = load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# It's useful to scale to ensure Stochastic Gradient Descent
# will do the right thing.
scaler = StandardScaler()
# DNN classifier.
classifier = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
hidden_units=[10, 20, 10], n_classes=3)
pipeline = Pipeline([('scaler', scaler),
('DNNclassifier', classifier)])
pipeline.fit(x_train, y_train, DNNclassifier__steps=200)
score = accuracy_score(y_test, list(pipeline.predict(x_test)))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
roxyboy/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 294 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
ephes/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 294 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
ephes/scikit-learn | sklearn/externals/joblib/__init__.py | 85 | 4795 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.0b3'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
edx/edx-platform | openedx/features/course_experience/tests/views/test_course_outline.py | 2 | 34442 | """
Tests for the Course Outline view and supporting views.
"""
import datetime
import re
from unittest.mock import Mock, patch
import ddt
from completion.waffle import ENABLE_COMPLETION_TRACKING_SWITCH
from completion.models import BlockCompletion
from completion.test_utils import CompletionWaffleTestMixin
from django.contrib.sites.models import Site
from django.test import RequestFactory, override_settings
from django.urls import reverse
from django.utils import timezone
from edx_toggles.toggles.testutils import override_waffle_flag, override_waffle_switch
from milestones.tests.utils import MilestonesTestCaseMixin
from opaque_keys.edx.keys import CourseKey, UsageKey
from pyquery import PyQuery as pq
from pytz import UTC
from waffle.models import Switch
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.course_modes.tests.factories import CourseModeFactory
from common.djangoapps.student.tests.factories import StaffFactory
from lms.djangoapps.course_api.blocks.transformers.milestones import MilestonesAndSpecialExamsTransformer
from lms.djangoapps.gating import api as lms_gating_api
from lms.djangoapps.courseware.tests.helpers import MasqueradeMixin
from lms.urls import RESET_COURSE_DEADLINES_NAME
from openedx.core.djangoapps.course_date_signals.models import SelfPacedRelativeDatesConfig
from openedx.core.djangoapps.schedules.models import Schedule
from openedx.core.djangoapps.schedules.tests.factories import ScheduleFactory # pylint: disable=unused-import
from openedx.core.lib.gating import api as gating_api
from openedx.features.content_type_gating.models import ContentTypeGatingConfig
from openedx.features.course_experience import RELATIVE_DATES_FLAG
from openedx.features.course_experience.views.course_outline import (
DEFAULT_COMPLETION_TRACKING_START,
CourseOutlineFragmentView
)
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.tests.factories import UserFactory
from xmodule.modulestore import ModuleStoreEnum # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory # lint-amnesty, pylint: disable=wrong-import-order
from ...utils import get_course_outline_block_tree
from .test_course_home import course_home_url
TEST_PASSWORD = 'test'
GATING_NAMESPACE_QUALIFIER = '.gating'
@ddt.ddt
class TestCourseOutlinePage(SharedModuleStoreTestCase, MasqueradeMixin):
"""
Test the course outline view.
"""
ENABLED_SIGNALS = ['course_published']
@classmethod
def setUpClass(cls): # lint-amnesty, pylint: disable=super-method-not-called
"""
Set up an array of various courses to be tested.
"""
SelfPacedRelativeDatesConfig.objects.create(enabled=True)
# setUpClassAndTestData() already calls setUpClass on SharedModuleStoreTestCase
# pylint: disable=super-method-not-called
with super().setUpClassAndTestData():
cls.courses = []
course = CourseFactory.create(self_paced=True)
with cls.store.bulk_operations(course.id):
chapter = ItemFactory.create(category='chapter', parent_location=course.location)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location, graded=True, format="Homework") # lint-amnesty, pylint: disable=line-too-long
vertical = ItemFactory.create(category='vertical', parent_location=sequential.location)
problem = ItemFactory.create(category='problem', parent_location=vertical.location)
course.children = [chapter]
chapter.children = [sequential]
sequential.children = [vertical]
vertical.children = [problem]
cls.courses.append(course)
course = CourseFactory.create()
with cls.store.bulk_operations(course.id):
chapter = ItemFactory.create(category='chapter', parent_location=course.location)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location)
sequential2 = ItemFactory.create(category='sequential', parent_location=chapter.location)
vertical = ItemFactory.create(
category='vertical',
parent_location=sequential.location,
display_name="Vertical 1"
)
vertical2 = ItemFactory.create(
category='vertical',
parent_location=sequential2.location,
display_name="Vertical 2"
)
course.children = [chapter]
chapter.children = [sequential, sequential2]
sequential.children = [vertical]
sequential2.children = [vertical2]
cls.courses.append(course)
course = CourseFactory.create()
with cls.store.bulk_operations(course.id):
chapter = ItemFactory.create(category='chapter', parent_location=course.location)
sequential = ItemFactory.create(
category='sequential',
parent_location=chapter.location,
due=datetime.datetime.now(),
graded=True,
format='Homework',
)
vertical = ItemFactory.create(category='vertical', parent_location=sequential.location)
course.children = [chapter]
chapter.children = [sequential]
sequential.children = [vertical]
cls.courses.append(course)
@classmethod
def setUpTestData(cls): # lint-amnesty, pylint: disable=super-method-not-called
"""Set up and enroll our fake user in the course."""
cls.user = UserFactory(password=TEST_PASSWORD)
for course in cls.courses:
CourseEnrollment.enroll(cls.user, course.id)
Schedule.objects.update(start_date=timezone.now() - datetime.timedelta(days=1))
def setUp(self):
"""
Set up for the tests.
"""
super().setUp()
self.client.login(username=self.user.username, password=TEST_PASSWORD)
@override_waffle_flag(RELATIVE_DATES_FLAG, active=True)
def test_outline_details(self):
for course in self.courses:
url = course_home_url(course)
request_factory = RequestFactory()
request = request_factory.get(url)
request.user = self.user
course_block_tree = get_course_outline_block_tree(
request, str(course.id), self.user
)
response = self.client.get(url)
assert course.children
for chapter in course_block_tree['children']:
self.assertContains(response, chapter['display_name'])
assert chapter['children']
for sequential in chapter['children']:
self.assertContains(response, sequential['display_name'])
if sequential['graded']:
print(sequential)
self.assertContains(response, sequential['due'].strftime('%Y-%m-%d %H:%M:%S'))
self.assertContains(response, sequential['format'])
assert sequential['children']
def test_num_graded_problems(self):
course = CourseFactory.create()
with self.store.bulk_operations(course.id):
chapter = ItemFactory.create(category='chapter', parent_location=course.location)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location)
problem = ItemFactory.create(category='problem', parent_location=sequential.location)
sequential2 = ItemFactory.create(category='sequential', parent_location=chapter.location)
problem2 = ItemFactory.create(category='problem', graded=True, has_score=True,
parent_location=sequential2.location)
sequential3 = ItemFactory.create(category='sequential', parent_location=chapter.location)
problem3_1 = ItemFactory.create(category='problem', graded=True, has_score=True,
parent_location=sequential3.location)
problem3_2 = ItemFactory.create(category='problem', graded=True, has_score=True,
parent_location=sequential3.location)
course.children = [chapter]
chapter.children = [sequential, sequential2, sequential3]
sequential.children = [problem]
sequential2.children = [problem2]
sequential3.children = [problem3_1, problem3_2]
CourseEnrollment.enroll(self.user, course.id)
url = course_home_url(course)
response = self.client.get(url)
content = response.content.decode('utf8')
self.assertRegex(content, sequential.display_name + r'\s*</h4>')
self.assertRegex(content, sequential2.display_name + r'\s*\(1 Question\)\s*</h4>')
self.assertRegex(content, sequential3.display_name + r'\s*\(2 Questions\)\s*</h4>')
@override_waffle_flag(RELATIVE_DATES_FLAG, active=True)
@ddt.data(
([CourseMode.AUDIT, CourseMode.VERIFIED], CourseMode.AUDIT, False, True),
([CourseMode.AUDIT, CourseMode.VERIFIED], CourseMode.VERIFIED, False, True),
([CourseMode.MASTERS], CourseMode.MASTERS, False, True),
([CourseMode.PROFESSIONAL], CourseMode.PROFESSIONAL, True, True), # staff accounts should also see the banner
)
@ddt.unpack
def test_reset_course_deadlines_banner_shows_for_self_paced_course(
self,
course_modes,
enrollment_mode,
is_course_staff,
should_display
):
ContentTypeGatingConfig.objects.create(
enabled=True,
enabled_as_of=datetime.datetime(2017, 1, 1, tzinfo=UTC),
)
course = self.courses[0]
for mode in course_modes:
CourseModeFactory.create(course_id=course.id, mode_slug=mode)
enrollment = CourseEnrollment.objects.get(course_id=course.id, user=self.user)
enrollment.mode = enrollment_mode
enrollment.save()
enrollment.schedule.start_date = timezone.now() - datetime.timedelta(days=30)
enrollment.schedule.save()
self.user.is_staff = is_course_staff
self.user.save()
url = course_home_url(course)
response = self.client.get(url)
if should_display:
self.assertContains(response, '<div class="banner-cta-text"')
else:
self.assertNotContains(response, '<div class="banner-cta-text"')
@override_waffle_flag(RELATIVE_DATES_FLAG, active=True)
def test_reset_course_deadlines(self):
course = self.courses[0]
staff = StaffFactory(course_key=course.id)
CourseEnrollment.enroll(staff, course.id)
start_date = timezone.now() - datetime.timedelta(days=30)
Schedule.objects.update(start_date=start_date)
self.client.login(username=staff.username, password=TEST_PASSWORD)
self.update_masquerade(course=course, username=self.user.username)
post_dict = {'course_id': str(course.id)}
self.client.post(reverse(RESET_COURSE_DEADLINES_NAME), post_dict)
updated_schedule = Schedule.objects.get(enrollment__user=self.user, enrollment__course_id=course.id)
assert updated_schedule.start_date.date() == datetime.datetime.today().date()
updated_staff_schedule = Schedule.objects.get(enrollment__user=staff, enrollment__course_id=course.id)
assert updated_staff_schedule.start_date == start_date
@override_waffle_flag(RELATIVE_DATES_FLAG, active=True)
def test_reset_course_deadlines_masquerade_generic_student(self):
course = self.courses[0]
staff = StaffFactory(course_key=course.id)
CourseEnrollment.enroll(staff, course.id)
start_date = timezone.now() - datetime.timedelta(days=30)
Schedule.objects.update(start_date=start_date)
self.client.login(username=staff.username, password=TEST_PASSWORD)
self.update_masquerade(course=course)
post_dict = {'course_id': str(course.id)}
self.client.post(reverse(RESET_COURSE_DEADLINES_NAME), post_dict)
updated_student_schedule = Schedule.objects.get(enrollment__user=self.user, enrollment__course_id=course.id)
assert updated_student_schedule.start_date == start_date
updated_staff_schedule = Schedule.objects.get(enrollment__user=staff, enrollment__course_id=course.id)
assert updated_staff_schedule.start_date.date() == datetime.date.today()
class TestCourseOutlinePageWithPrerequisites(SharedModuleStoreTestCase, MilestonesTestCaseMixin):
"""
Test the course outline view with prerequisites.
"""
TRANSFORMER_CLASS_TO_TEST = MilestonesAndSpecialExamsTransformer
@classmethod
def setUpClass(cls):
"""
Creates a test course that can be used for non-destructive tests
"""
# pylint: disable=super-method-not-called
cls.PREREQ_REQUIRED = '(Prerequisite required)'
cls.UNLOCKED = 'Unlocked'
with super().setUpClassAndTestData():
cls.course, cls.course_blocks = cls.create_test_course()
@classmethod
def setUpTestData(cls): # lint-amnesty, pylint: disable=super-method-not-called
"""Set up and enroll our fake user in the course."""
cls.user = UserFactory(password=TEST_PASSWORD)
CourseEnrollment.enroll(cls.user, cls.course.id)
@classmethod
def create_test_course(cls):
"""Creates a test course."""
course = CourseFactory.create()
course.enable_subsection_gating = True
course_blocks = {}
with cls.store.bulk_operations(course.id):
course_blocks['chapter'] = ItemFactory.create(
category='chapter',
parent_location=course.location
)
course_blocks['prerequisite'] = ItemFactory.create(
category='sequential',
parent_location=course_blocks['chapter'].location,
display_name='Prerequisite Exam'
)
course_blocks['gated_content'] = ItemFactory.create(
category='sequential',
parent_location=course_blocks['chapter'].location,
display_name='Gated Content'
)
course_blocks['prerequisite_vertical'] = ItemFactory.create(
category='vertical',
parent_location=course_blocks['prerequisite'].location
)
course_blocks['gated_content_vertical'] = ItemFactory.create(
category='vertical',
parent_location=course_blocks['gated_content'].location
)
course.children = [course_blocks['chapter']]
course_blocks['chapter'].children = [course_blocks['prerequisite'], course_blocks['gated_content']]
course_blocks['prerequisite'].children = [course_blocks['prerequisite_vertical']]
course_blocks['gated_content'].children = [course_blocks['gated_content_vertical']]
if hasattr(cls, 'user'):
CourseEnrollment.enroll(cls.user, course.id)
return course, course_blocks
def setUp(self):
"""
Set up for the tests.
"""
super().setUp()
self.client.login(username=self.user.username, password=TEST_PASSWORD)
def setup_gated_section(self, gated_block, gating_block):
"""
Test helper to create a gating requirement
Args:
gated_block: The block the that learner will not have access to until they complete the gating block
gating_block: (The prerequisite) The block that must be completed to get access to the gated block
"""
gating_api.add_prerequisite(self.course.id, str(gating_block.location))
gating_api.set_required_content(self.course.id, gated_block.location, gating_block.location, 100)
def test_content_locked(self):
"""
Test that a sequential/subsection with unmet prereqs correctly indicated that its content is locked
"""
course = self.course
self.setup_gated_section(self.course_blocks['gated_content'], self.course_blocks['prerequisite'])
response = self.client.get(course_home_url(course))
assert response.status_code == 200
response_content = pq(response.content)
# check lock icon is present
lock_icon = response_content('.fa-lock')
assert lock_icon, 'lock icon is not present, but should be'
subsection = lock_icon.parents('.subsection-text')
# check that subsection-title-name is the display name
gated_subsection_title = self.course_blocks['gated_content'].display_name
assert gated_subsection_title in subsection.children('.subsection-title').html()
# check that it says prerequisite required
assert 'Prerequisite:' in subsection.children('.details').html()
# check that there is not a screen reader message
assert not subsection.children('.sr')
def test_content_unlocked(self):
"""
Test that a sequential/subsection with met prereqs correctly indicated that its content is unlocked
"""
course = self.course
self.setup_gated_section(self.course_blocks['gated_content'], self.course_blocks['prerequisite'])
# complete the prerequisite to unlock the gated content
# this call triggers reevaluation of prerequisites fulfilled by the gating block.
with patch('openedx.core.lib.gating.api.get_subsection_completion_percentage', Mock(return_value=100)):
lms_gating_api.evaluate_prerequisite(
self.course,
Mock(location=self.course_blocks['prerequisite'].location, percent_graded=1.0),
self.user,
)
response = self.client.get(course_home_url(course))
assert response.status_code == 200
response_content = pq(response.content)
# check unlock icon is not present
unlock_icon = response_content('.fa-unlock')
assert not unlock_icon, "unlock icon is present, yet shouldn't be."
gated_subsection_title = self.course_blocks['gated_content'].display_name
every_subsection_on_outline = response_content('.subsection-title')
subsection_has_gated_text = False
says_prerequisite_required = False
for subsection_contents in every_subsection_on_outline.contents():
subsection_has_gated_text = gated_subsection_title in subsection_contents
says_prerequisite_required = "Prerequisite:" in subsection_contents
# check that subsection-title-name is the display name of gated content section
assert subsection_has_gated_text
assert not says_prerequisite_required
class TestCourseOutlineResumeCourse(SharedModuleStoreTestCase, CompletionWaffleTestMixin):
"""
Test start course and resume course for the course outline view.
Technically, this mixes course home and course outline tests, but checking
the counts of start/resume course should be done together to avoid false
positives.
"""
@classmethod
def setUpClass(cls):
"""
Creates a test course that can be used for non-destructive tests
"""
# setUpClassAndTestData() already calls setUpClass on SharedModuleStoreTestCase
# pylint: disable=super-method-not-called
with super().setUpClassAndTestData():
cls.course = cls.create_test_course()
@classmethod
def setUpTestData(cls): # lint-amnesty, pylint: disable=super-method-not-called
"""Set up and enroll our fake user in the course."""
cls.user = UserFactory(password=TEST_PASSWORD)
CourseEnrollment.enroll(cls.user, cls.course.id)
cls.site = Site.objects.get_current()
@classmethod
def create_test_course(cls):
"""
Creates a test course.
"""
course = CourseFactory.create()
with cls.store.bulk_operations(course.id):
chapter = ItemFactory.create(category='chapter', parent_location=course.location)
chapter2 = ItemFactory.create(category='chapter', parent_location=course.location)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location)
sequential2 = ItemFactory.create(category='sequential', parent_location=chapter.location)
sequential3 = ItemFactory.create(category='sequential', parent_location=chapter2.location)
sequential4 = ItemFactory.create(category='sequential', parent_location=chapter2.location)
vertical = ItemFactory.create(category='vertical', parent_location=sequential.location)
vertical2 = ItemFactory.create(category='vertical', parent_location=sequential2.location)
vertical3 = ItemFactory.create(category='vertical', parent_location=sequential3.location)
vertical4 = ItemFactory.create(category='vertical', parent_location=sequential4.location)
problem = ItemFactory.create(category='problem', parent_location=vertical.location)
problem2 = ItemFactory.create(category='problem', parent_location=vertical2.location)
problem3 = ItemFactory.create(category='problem', parent_location=vertical3.location)
course.children = [chapter, chapter2]
chapter.children = [sequential, sequential2]
chapter2.children = [sequential3, sequential4]
sequential.children = [vertical]
sequential2.children = [vertical2]
sequential3.children = [vertical3]
sequential4.children = [vertical4]
vertical.children = [problem]
vertical2.children = [problem2]
vertical3.children = [problem3]
if hasattr(cls, 'user'):
CourseEnrollment.enroll(cls.user, course.id)
return course
def setUp(self):
"""
Set up for the tests.
"""
super().setUp()
self.client.login(username=self.user.username, password=TEST_PASSWORD)
def visit_sequential(self, course, chapter, sequential):
"""
Navigates to the provided sequential.
"""
last_accessed_url = reverse(
'courseware_section',
kwargs={
'course_id': str(course.id),
'chapter': chapter.url_name,
'section': sequential.url_name,
}
)
assert 200 == self.client.get(last_accessed_url).status_code
@override_waffle_switch(ENABLE_COMPLETION_TRACKING_SWITCH, active=True)
def complete_sequential(self, course, sequential):
"""
Completes provided sequential.
"""
course_key = CourseKey.from_string(str(course.id))
# Fake a visit to sequence2/vertical2
block_key = UsageKey.from_string(str(sequential.location))
if block_key.course_key.run is None:
# Old mongo keys must be annotated with course run info before calling submit_completion:
block_key = block_key.replace(course_key=course_key)
completion = 1.0
BlockCompletion.objects.submit_completion(
user=self.user,
block_key=block_key,
completion=completion
)
def visit_course_home(self, course, start_count=0, resume_count=0):
"""
Helper function to navigates to course home page, test for resume buttons
:param course: course factory object
:param start_count: number of times 'Start Course' should appear
:param resume_count: number of times 'Resume Course' should appear
:return: response object
"""
response = self.client.get(course_home_url(course))
assert response.status_code == 200
self.assertContains(response, 'Start Course', count=start_count)
self.assertContains(response, 'Resume Course', count=resume_count)
return response
def test_course_home_completion(self):
"""
Test that completed blocks appear checked on course home page
"""
self.override_waffle_switch(True)
course = self.course
vertical = course.children[0].children[0].children[0]
response = self.client.get(course_home_url(course))
content = pq(response.content)
assert len(content('.fa-check')) == 0
self.complete_sequential(self.course, vertical)
response = self.client.get(course_home_url(course))
content = pq(response.content)
# Subsection should be checked. Subsection 4 is also checked because it contains a vertical with no content
assert len(content('.fa-check')) == 2
def test_start_course(self):
"""
Tests that the start course button appears when the course has never been accessed.
Technically, this is a course home test, and not a course outline test, but checking the counts of
start/resume course should be done together to not get a false positive.
"""
course = self.course
response = self.visit_course_home(course, start_count=1, resume_count=0)
content = pq(response.content)
problem = course.children[0].children[0].children[0].children[0]
assert content('.action-resume-course').attr('href').endswith('/problem/' + problem.url_name)
@override_settings(LMS_BASE='test_url:9999')
def test_resume_course_with_completion_api(self):
"""
Tests completion API resume button functionality
"""
self.override_waffle_switch(True)
# Course tree
course = self.course
problem1 = course.children[0].children[0].children[0].children[0]
problem2 = course.children[0].children[1].children[0].children[0]
self.complete_sequential(self.course, problem1)
# Test for 'resume' link
response = self.visit_course_home(course, resume_count=1)
# Test for 'resume' link URL - should be problem 1
content = pq(response.content)
assert content('.action-resume-course').attr('href').endswith('/problem/' + problem1.url_name)
self.complete_sequential(self.course, problem2)
# Test for 'resume' link
response = self.visit_course_home(course, resume_count=1)
# Test for 'resume' link URL - should be problem 2
content = pq(response.content)
assert content('.action-resume-course').attr('href').endswith('/problem/' + problem2.url_name)
# visit sequential 1, make sure 'Resume Course' URL is robust against 'Last Visited'
# (even though I visited seq1/vert1, 'Resume Course' still points to seq2/vert2)
self.visit_sequential(course, course.children[0], course.children[0].children[0])
# Test for 'resume' link URL - should be problem 2 (last completed block, NOT last visited)
response = self.visit_course_home(course, resume_count=1)
content = pq(response.content)
assert content('.action-resume-course').attr('href').endswith('/problem/' + problem2.url_name)
def test_resume_course_deleted_sequential(self):
"""
Tests resume course when the last completed sequential is deleted and
there is another sequential in the vertical.
"""
course = self.create_test_course()
# first navigate to a sequential to make it the last accessed
chapter = course.children[0]
assert len(chapter.children) >= 2
sequential = chapter.children[0]
sequential2 = chapter.children[1]
self.complete_sequential(course, sequential)
self.complete_sequential(course, sequential2)
# remove one of the sequentials from the chapter
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id):
self.store.delete_item(sequential.location, self.user.id)
# check resume course buttons
response = self.visit_course_home(course, resume_count=1)
content = pq(response.content)
assert content('.action-resume-course').attr('href').endswith('/sequential/' + sequential2.url_name)
def test_resume_course_deleted_sequentials(self):
"""
Tests resume course when the last completed sequential is deleted and
there are no sequentials left in the vertical.
"""
course = self.create_test_course()
# first navigate to a sequential to make it the last accessed
chapter = course.children[0]
assert len(chapter.children) == 2
sequential = chapter.children[0]
self.complete_sequential(course, sequential)
# remove all sequentials from chapter
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id):
for sequential in chapter.children:
self.store.delete_item(sequential.location, self.user.id)
# check resume course buttons
self.visit_course_home(course, start_count=1, resume_count=0)
def test_course_home_for_global_staff(self):
"""
Tests that staff user can access the course home without being enrolled
in the course.
"""
course = self.course
self.user.is_staff = True
self.user.save()
self.override_waffle_switch(True)
CourseEnrollment.get_enrollment(self.user, course.id).delete()
response = self.visit_course_home(course, start_count=1, resume_count=0)
content = pq(response.content)
problem = course.children[0].children[0].children[0].children[0]
assert content('.action-resume-course').attr('href').endswith('/problem/' + problem.url_name)
@override_waffle_switch(ENABLE_COMPLETION_TRACKING_SWITCH, active=True)
def test_course_outline_auto_open(self):
"""
Tests that the course outline auto-opens to the first subsection
in a course if a user has no completion data, and to the
last-accessed subsection if a user does have completion data.
"""
def get_sequential_button(url, is_hidden):
is_hidden_string = "is-hidden" if is_hidden else ""
return "<olclass=\"outline-itemaccordion-panel" + is_hidden_string + "\"" \
"id=\"" + url + "_contents\"" \
"aria-labelledby=\"" + url + "\"" \
">"
# Course tree
course = self.course
chapter1 = course.children[0]
chapter2 = course.children[1]
response_content = self.client.get(course_home_url(course)).content
stripped_response = str(re.sub(b"\\s+", b"", response_content), "utf-8")
assert get_sequential_button(str(chapter1.location), False) in stripped_response
assert get_sequential_button(str(chapter2.location), True) in stripped_response
content = pq(response_content)
button = content('#expand-collapse-outline-all-button')
assert 'Expand All' == button.children()[0].text
def test_user_enrolled_after_completion_collection(self):
"""
Tests that the _completion_data_collection_start() method returns the created
time of the waffle switch that enables completion data tracking.
"""
view = CourseOutlineFragmentView()
switch_name = ENABLE_COMPLETION_TRACKING_SWITCH.name
switch, _ = Switch.objects.get_or_create(name=switch_name)
# pylint: disable=protected-access
assert switch.created == view._completion_data_collection_start()
switch.delete()
def test_user_enrolled_after_completion_collection_default(self):
"""
Tests that the _completion_data_collection_start() method returns a default constant
when no Switch object exists for completion data tracking.
"""
view = CourseOutlineFragmentView()
# pylint: disable=protected-access
assert DEFAULT_COMPLETION_TRACKING_START == view._completion_data_collection_start()
class TestCourseOutlinePreview(SharedModuleStoreTestCase, MasqueradeMixin):
"""
Unit tests for staff preview of the course outline.
"""
def test_preview(self):
"""
Verify the behavior of preview for the course outline.
"""
course = CourseFactory.create(
start=datetime.datetime.now() - datetime.timedelta(days=30)
)
staff_user = StaffFactory(course_key=course.id, password=TEST_PASSWORD)
CourseEnrollment.enroll(staff_user, course.id)
future_date = datetime.datetime.now() + datetime.timedelta(days=30)
with self.store.bulk_operations(course.id):
chapter = ItemFactory.create(
category='chapter',
parent_location=course.location,
display_name='First Chapter',
)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location)
ItemFactory.create(category='vertical', parent_location=sequential.location)
chapter = ItemFactory.create(
category='chapter',
parent_location=course.location,
display_name='Future Chapter',
start=future_date,
)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location)
ItemFactory.create(category='vertical', parent_location=sequential.location)
# Verify that a staff user sees a chapter with a due date in the future
self.client.login(username=staff_user.username, password='test')
url = course_home_url(course)
response = self.client.get(url)
assert response.status_code == 200
self.assertContains(response, 'Future Chapter')
# Verify that staff masquerading as a learner see the future chapter.
self.update_masquerade(course=course, role='student')
response = self.client.get(url)
assert response.status_code == 200
self.assertContains(response, 'Future Chapter')
| agpl-3.0 |
rgommers/statsmodels | statsmodels/tsa/filters/bk_filter.py | 28 | 3112 | from __future__ import absolute_import
import numpy as np
from scipy.signal import fftconvolve
from ._utils import _maybe_get_pandas_wrapper
def bkfilter(X, low=6, high=32, K=12):
"""
Baxter-King bandpass filter
Parameters
----------
X : array-like
A 1 or 2d ndarray. If 2d, variables are assumed to be in columns.
low : float
Minimum period for oscillations, ie., Baxter and King suggest that
the Burns-Mitchell U.S. business cycle has 6 for quarterly data and
1.5 for annual data.
high : float
Maximum period for oscillations BK suggest that the U.S.
business cycle has 32 for quarterly data and 8 for annual data.
K : int
Lead-lag length of the filter. Baxter and King propose a truncation
length of 12 for quarterly data and 3 for annual data.
Returns
-------
Y : array
Cyclical component of X
References
---------- ::
Baxter, M. and R. G. King. "Measuring Business Cycles: Approximate
Band-Pass Filters for Economic Time Series." *Review of Economics and
Statistics*, 1999, 81(4), 575-593.
Notes
-----
Returns a centered weighted moving average of the original series. Where
the weights a[j] are computed ::
a[j] = b[j] + theta, for j = 0, +/-1, +/-2, ... +/- K
b[0] = (omega_2 - omega_1)/pi
b[j] = 1/(pi*j)(sin(omega_2*j)-sin(omega_1*j), for j = +/-1, +/-2,...
and theta is a normalizing constant ::
theta = -sum(b)/(2K+1)
Examples
--------
>>> import statsmodels.api as sm
>>> import pandas as pd
>>> dta = sm.datasets.macrodata.load_pandas().data
>>> dates = sm.tsa.datetools.dates_from_range('1959Q1', '2009Q3')
>>> index = pd.DatetimeIndex(dates)
>>> dta.set_index(index, inplace=True)
>>> cycles = sm.tsa.filters.bkfilter(dta[['realinv']], 6, 24, 12)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> cycles.plot(ax=ax, style=['r--', 'b-'])
>>> plt.show()
.. plot:: plots/bkf_plot.py
"""
#TODO: change the docstring to ..math::?
#TODO: allow windowing functions to correct for Gibb's Phenomenon?
# adjust bweights (symmetrically) by below before demeaning
# Lancosz Sigma Factors np.sinc(2*j/(2.*K+1))
_pandas_wrapper = _maybe_get_pandas_wrapper(X, K, K)
X = np.asarray(X)
omega_1 = 2.*np.pi/high # convert from freq. to periodicity
omega_2 = 2.*np.pi/low
bweights = np.zeros(2*K+1)
bweights[K] = (omega_2 - omega_1)/np.pi # weight at zero freq.
j = np.arange(1,int(K)+1)
weights = 1/(np.pi*j)*(np.sin(omega_2*j)-np.sin(omega_1*j))
bweights[K+j] = weights # j is an idx
bweights[:K] = weights[::-1] # make symmetric weights
bweights -= bweights.mean() # make sure weights sum to zero
if X.ndim == 2:
bweights = bweights[:,None]
X = fftconvolve(X, bweights, mode='valid') # get a centered moving avg/
# convolution
if _pandas_wrapper is not None:
return _pandas_wrapper(X)
return X
| bsd-3-clause |
jeffreyliu3230/osf.io | website/addons/dataverse/tests/test_client.py | 16 | 8789 | from nose.tools import *
import mock
import unittest
from dataverse import Connection, Dataverse, DataverseFile, Dataset
from dataverse.exceptions import UnauthorizedError
from framework.exceptions import HTTPError
from website.addons.dataverse.tests.utils import DataverseAddonTestCase
from website.addons.dataverse.tests.utils import create_external_account
from website.addons.dataverse.client import (
_connect, get_files, publish_dataset, get_datasets, get_dataset,
get_dataverses, get_dataverse, connect_from_settings, connect_or_401,
connect_from_settings_or_401,
)
from website.addons.dataverse.model import AddonDataverseNodeSettings
class TestClient(DataverseAddonTestCase):
def setUp(self):
super(TestClient, self).setUp()
self.host = 'some.host.url'
self.token = 'some-fancy-api-token-which-is-long'
self.mock_connection = mock.create_autospec(Connection)
self.mock_dataverse = mock.create_autospec(Dataverse)
self.mock_dataset = mock.create_autospec(Dataset)
self.mock_file = mock.create_autospec(DataverseFile)
self.mock_file.dataset = self.mock_dataset
self.mock_dataset.dataverse = self.mock_dataverse
self.mock_dataverse.connection = self.mock_connection
@mock.patch('website.addons.dataverse.client.Connection')
def test_connect(self, mock_connection):
mock_connection.return_value = mock.create_autospec(Connection)
c = _connect(self.host, self.token)
mock_connection.assert_called_once_with(self.host, self.token)
assert_true(c)
@mock.patch('website.addons.dataverse.client.Connection')
def test_connect_fail(self, mock_connection):
mock_connection.side_effect = UnauthorizedError()
with assert_raises(UnauthorizedError):
_connect(self.host, self.token)
mock_connection.assert_called_once_with(self.host, self.token)
@mock.patch('website.addons.dataverse.client.Connection')
def test_connect_or_401(self, mock_connection):
mock_connection.return_value = mock.create_autospec(Connection)
c = connect_or_401(self.host, self.token)
mock_connection.assert_called_once_with(self.host, self.token)
assert_true(c)
@mock.patch('website.addons.dataverse.client.Connection')
def test_connect_or_401_forbidden(self, mock_connection):
mock_connection.side_effect = UnauthorizedError()
with assert_raises(HTTPError) as cm:
connect_or_401(self.host, self.token)
mock_connection.assert_called_once_with(self.host, self.token)
assert_equal(cm.exception.code, 401)
@mock.patch('website.addons.dataverse.client._connect')
def test_connect_from_settings(self, mock_connect):
node_settings = AddonDataverseNodeSettings()
node_settings.external_account = create_external_account(
self.host, self.token,
)
connection = connect_from_settings(node_settings)
assert_true(connection)
mock_connect.assert_called_once_with(self.host, self.token)
def test_connect_from_settings_none(self):
connection = connect_from_settings(None)
assert_is_none(connection)
@mock.patch('website.addons.dataverse.client._connect')
def test_connect_from_settings_or_401(self, mock_connect):
node_settings = AddonDataverseNodeSettings()
node_settings.external_account = create_external_account(
self.host, self.token,
)
connection = connect_from_settings_or_401(node_settings)
assert_true(connection)
mock_connect.assert_called_once_with(self.host, self.token)
def test_connect_from_settings_or_401_none(self):
connection = connect_from_settings_or_401(None)
assert_is_none(connection)
@mock.patch('website.addons.dataverse.client.Connection')
def test_connect_from_settings_or_401_forbidden(self, mock_connection):
mock_connection.side_effect = UnauthorizedError()
node_settings = AddonDataverseNodeSettings()
node_settings.external_account = create_external_account(
self.host, self.token,
)
with assert_raises(HTTPError) as e:
connect_from_settings_or_401(node_settings)
mock_connection.assert_called_once_with(self.host, self.token)
assert_equal(e.exception.code, 401)
def test_get_files(self):
published = False
get_files(self.mock_dataset, published)
self.mock_dataset.get_files.assert_called_once_with('latest')
def test_get_files_published(self):
published = True
get_files(self.mock_dataset, published)
self.mock_dataset.get_files.assert_called_once_with('latest-published')
def test_publish_dataset(self):
publish_dataset(self.mock_dataset)
self.mock_dataset.publish.assert_called_once_with()
def test_publish_dataset_unpublished_dataverse(self):
type(self.mock_dataverse).is_published = mock.PropertyMock(return_value=False)
with assert_raises(HTTPError) as e:
publish_dataset(self.mock_dataset)
assert_false(self.mock_dataset.publish.called)
assert_equal(e.exception.code, 405)
def test_get_datasets(self):
mock_dataset1 = mock.create_autospec(Dataset)
mock_dataset2 = mock.create_autospec(Dataset)
mock_dataset3 = mock.create_autospec(Dataset)
mock_dataset1.get_state.return_value = 'DRAFT'
mock_dataset2.get_state.return_value = 'RELEASED'
mock_dataset3.get_state.return_value = 'DEACCESSIONED'
self.mock_dataverse.get_datasets.return_value = [
mock_dataset1, mock_dataset2, mock_dataset3
]
datasets = get_datasets(self.mock_dataverse)
self.mock_dataverse.get_datasets.assert_called_once_with()
assert_in(mock_dataset1, datasets)
assert_in(mock_dataset2, datasets)
assert_in(mock_dataset3, datasets)
def test_get_datasets_no_dataverse(self):
datasets = get_datasets(None)
assert_equal(datasets, [])
def test_get_dataset(self):
self.mock_dataset.get_state.return_value = 'DRAFT'
self.mock_dataverse.get_dataset_by_doi.return_value = self.mock_dataset
s = get_dataset(self.mock_dataverse, 'My hdl')
self.mock_dataverse.get_dataset_by_doi.assert_called_once_with('My hdl')
assert_equal(s, self.mock_dataset)
def test_get_deaccessioned_dataset(self):
self.mock_dataset.get_state.return_value = 'DEACCESSIONED'
self.mock_dataverse.get_dataset_by_doi.return_value = self.mock_dataset
with assert_raises(HTTPError) as e:
s = get_dataset(self.mock_dataverse, 'My hdl')
self.mock_dataverse.get_dataset_by_doi.assert_called_once_with('My hdl')
assert_equal(e.exception.code, 410)
def test_get_bad_dataset(self):
error = UnicodeDecodeError('utf-8', b'', 1, 2, 'jeepers')
self.mock_dataset.get_state.side_effect = error
self.mock_dataverse.get_dataset_by_doi.return_value = self.mock_dataset
with assert_raises(HTTPError) as e:
s = get_dataset(self.mock_dataverse, 'My hdl')
self.mock_dataverse.get_dataset_by_doi.assert_called_once_with('My hdl')
assert_equal(e.exception.code, 406)
def test_get_dataverses(self):
published_dv = mock.create_autospec(Dataverse)
unpublished_dv = mock.create_autospec(Dataverse)
type(published_dv).is_published = mock.PropertyMock(return_value=True)
type(unpublished_dv).is_published = mock.PropertyMock(return_value=False)
self.mock_connection.get_dataverses.return_value = [
published_dv, unpublished_dv
]
dvs = get_dataverses(self.mock_connection)
self.mock_connection.get_dataverses.assert_called_once_with()
assert_in(published_dv, dvs)
assert_in(unpublished_dv, dvs)
assert_equal(len(dvs), 2)
def test_get_dataverse(self):
type(self.mock_dataverse).is_published = mock.PropertyMock(return_value=True)
self.mock_connection.get_dataverse.return_value = self.mock_dataverse
d = get_dataverse(self.mock_connection, 'ALIAS')
self.mock_connection.get_dataverse.assert_called_once_with('ALIAS')
assert_equal(d, self.mock_dataverse)
def test_get_unpublished_dataverse(self):
type(self.mock_dataverse).is_published = mock.PropertyMock(return_value=False)
self.mock_connection.get_dataverse.return_value = self.mock_dataverse
d = get_dataverse(self.mock_connection, 'ALIAS')
self.mock_connection.get_dataverse.assert_called_once_with('ALIAS')
assert_equal(d, self.mock_dataverse)
| apache-2.0 |
gdementen/larray | larray/tests/generate_data.py | 2 | 9260 | import os
from larray import ndtest, open_excel, Session, X
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
def generate_tests_files():
tests = {'1d': 3,
'2d': "a=1..3; b=b0,b1",
'2d_classic': "a=a0..a2;b=b0..b2",
'3d': "a=1..3; b=b0,b1; c=c0..c2",
'int_labels': "a=0..2; b=0..2; c=0..2",
'missing_values': "a=1..3; b=b0,b1; c=c0..c2",
'unsorted': "a=3..1; b=b1,b0; c=c2..c0",
'position': "a=1..3; b=b0,b1; c=c0..c2"}
wb = open_excel(os.path.join(DATA_DIR, 'test.xlsx'), overwrite_file=True)
wb_narrow = open_excel(os.path.join(DATA_DIR, 'test_narrow.xlsx'), overwrite_file=True)
for name, dim in tests.items():
arr = ndtest(dim)
if name == '2d_classic':
df = arr.to_frame(fold_last_axis_name=False)
# wide format
df.to_csv(os.path.join(DATA_DIR, f'test{name}.csv'), sep=',', na_rep='')
wb[name] = ''
wb[name]['A1'].options().value = df
# narrow format
df = arr.to_series(name='value')
df.to_csv(os.path.join(DATA_DIR, f'test{name}_narrow.csv'), sep=',', na_rep='', header=True)
wb_narrow[name] = ''
wb_narrow[name]['A1'].options().value = df
elif name == 'missing_values':
df = arr.to_frame(fold_last_axis_name=True)
# wide format
df = df.drop([(2, 'b0'), (3, 'b1')])
df.to_csv(os.path.join(DATA_DIR, f'test{name}.csv'), sep=',', na_rep='')
wb[name] = ''
wb[name]['A1'].options().value = df
# narrow format
df = arr.to_series(name='value')
df = df.drop([(2, 'b0'), (2, 'b1', 'c1'), (3, 'b1')])
df.to_csv(os.path.join(DATA_DIR, f'test{name}_narrow.csv'), sep=',', na_rep='', header=True)
wb_narrow[name] = ''
wb_narrow[name]['A1'].options().value = df
elif name == 'position':
# wide format
wb[name] = ''
wb[name]['D3'] = arr.dump()
# narrow format
wb_narrow[name] = ''
wb_narrow[name]['D3'] = arr.dump(wide=False)
else:
# wide format
arr.to_csv(os.path.join(DATA_DIR, f'test{name}.csv'))
wb[name] = arr.dump()
# narrow format
arr.to_csv(os.path.join(DATA_DIR, f'test{name}_narrow.csv'), wide=False)
wb_narrow[name] = arr.dump(wide=False)
wb.save()
wb.close()
wb_narrow.save()
wb_narrow.close()
def generate_example_files(csv=True, excel=True, hdf5=True):
from larray_eurostat import eurostat_get
def prepare_eurostat_data(dataset_name, countries):
arr = eurostat_get(dataset_name)[X.unit['NR'], X.age['TOTAL'], X.sex['M,F']]
arr = arr[X.time[::-1]][2013:2017]
arr = arr.rename('sex', 'gender')
arr = arr.set_labels(gender='Male,Female')
arr = arr.rename('geo', 'country')
country_codes = list(countries.keys())
country_names = list(countries.values())
if dataset_name == 'migr_imm1ctz':
# example of an array with ambiguous axes
arr = arr['COMPLET', X.citizen[country_codes], X.country[country_codes]].astype(int)
arr = arr.rename('citizen', 'citizenship')
arr = arr.set_labels('citizenship', country_names)
arr = arr.set_labels('country', country_names)
arr = arr.transpose('country', 'citizenship', 'gender', 'time')
else:
arr = arr[country_codes].astype(int)
arr = arr.set_labels('country', country_names)
arr = arr.transpose('country', 'gender', 'time')
return arr
countries = {'BE': 'Belgium', 'FR': 'France', 'DE': 'Germany'}
benelux = {'BE': 'Belgium', 'LU': 'Luxembourg', 'NL': 'Netherlands'}
# Arrays
population = prepare_eurostat_data('demo_pjan', countries)
population.meta.title = 'Population on 1 January by age and sex'
population.meta.source = 'table demo_pjan from Eurostat'
# ----
population_benelux = prepare_eurostat_data('demo_pjan', benelux)
population_benelux.meta.title = 'Population on 1 January by age and sex (Benelux)'
population_benelux.meta.source = 'table demo_pjan from Eurostat'
# ----
population_5_countries = population.extend('country', population_benelux[['Luxembourg', 'Netherlands']])
population_5_countries.meta.title = 'Population on 1 January by age and sex (Benelux + France + Germany)'
population_5_countries.meta.source = 'table demo_pjan from Eurostat'
# ----
births = prepare_eurostat_data('demo_fasec', countries)
births.meta.title = "Live births by mother's age and newborn's sex"
births.meta.source = 'table demo_fasec from Eurostat'
# ----
deaths = prepare_eurostat_data('demo_magec', countries)
deaths.meta.title = 'Deaths by age and sex'
deaths.meta.source = 'table demo_magec from Eurostat'
# ----
immigration = prepare_eurostat_data('migr_imm1ctz', benelux)
immigration.meta.title = 'Immigration by age group, sex and citizenship'
immigration.meta.source = 'table migr_imm1ctz from Eurostat'
# Groups
even_years = population.time[2014::2] >> 'even_years'
odd_years = population.time[2013::2] >> 'odd_years'
# Session
ses = Session({'country': population.country, 'country_benelux': immigration.country,
'citizenship': immigration.citizenship,
'gender': population.gender, 'time': population.time,
'even_years': even_years, 'odd_years': odd_years,
'population': population, 'population_benelux': population_benelux,
'population_5_countries': population_5_countries,
'births': births, 'deaths': deaths, 'immigration': immigration})
ses.meta.title = 'Demographic datasets for a small selection of countries in Europe'
ses.meta.source = 'demo_jpan, demo_fasec, demo_magec and migr_imm1ctz tables from Eurostat'
# EUROSTAT DATASET
if csv:
ses.save(os.path.join(DATA_DIR, 'demography_eurostat'))
if excel:
ses.save(os.path.join(DATA_DIR, 'demography_eurostat.xlsx'))
if hdf5:
ses.save(os.path.join(DATA_DIR, 'demography_eurostat.h5'))
# EXAMPLE FILES
years = population.time[2013:2015]
population = population[years]
population_narrow = population['Belgium,France'].sum('gender')
births = births[years]
deaths = deaths[years]
immigration = immigration[years]
# Dataframes (for testing missing axis/values)
df_missing_axis_name = population.to_frame(fold_last_axis_name=False)
df_missing_values = population.to_frame(fold_last_axis_name=True)
df_missing_values.drop([('France', 'Male'), ('Germany', 'Female')], inplace=True)
if csv:
examples_dir = os.path.join(DATA_DIR, 'examples')
population.to_csv(os.path.join(examples_dir, 'population.csv'))
births.to_csv(os.path.join(examples_dir, 'births.csv'))
deaths.to_csv(os.path.join(examples_dir, 'deaths.csv'))
immigration.to_csv(os.path.join(examples_dir, 'immigration.csv'))
df_missing_axis_name.to_csv(os.path.join(examples_dir, 'population_missing_axis_name.csv'), sep=',', na_rep='')
df_missing_values.to_csv(os.path.join(examples_dir, 'population_missing_values.csv'), sep=',', na_rep='')
population_narrow.to_csv(os.path.join(examples_dir, 'population_narrow_format.csv'), wide=False)
if excel:
with open_excel(os.path.join(DATA_DIR, 'examples.xlsx'), overwrite_file=True) as wb:
wb['population'] = population.dump()
wb['births'] = births.dump()
wb['deaths'] = deaths.dump()
wb['immigration'] = immigration.dump()
wb['population_births_deaths'] = population.dump()
wb['population_births_deaths']['A9'] = births.dump()
wb['population_births_deaths']['A17'] = deaths.dump()
wb['population_missing_axis_name'] = ''
wb['population_missing_axis_name']['A1'].options().value = df_missing_axis_name
wb['population_missing_values'] = ''
wb['population_missing_values']['A1'].options().value = df_missing_values
# wb['population_narrow_format'] = population_narrow.dump(wide=False)
wb.save()
population_narrow.to_excel(os.path.join(DATA_DIR, 'examples.xlsx'), 'population_narrow_format', wide=False)
Session({'country': population.country, 'gender': population.gender, 'time': population.time,
'population': population}).save(os.path.join(DATA_DIR, 'population_only.xlsx'))
Session({'births': births, 'deaths': deaths}).save(os.path.join(DATA_DIR, 'births_and_deaths.xlsx'))
if hdf5:
examples_h5_file = os.path.join(DATA_DIR, 'examples.h5')
population.to_hdf(examples_h5_file, 'population')
births.to_hdf(examples_h5_file, 'births')
deaths.to_hdf(examples_h5_file, 'deaths')
immigration.to_hdf(examples_h5_file, 'immigration')
if __name__ == '__main__':
# generate_tests_files()
generate_example_files()
| gpl-3.0 |
LohithBlaze/scikit-learn | sklearn/utils/tests/test_multiclass.py | 128 | 12853 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that were'nt supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
Diyago/Machine-Learning-scripts | DEEP LEARNING/Kaggle Avito Demand Prediction Challenge/image feat. extraction/avito_deepIQA/deepIQA/evaluate_back.py | 1 | 2442 | #!/usr/bin/python2
import argparse
import cv2
import numpy as np
import six
from chainer import cuda
from chainer import serializers
from sklearn.feature_extraction.image import extract_patches
from deepIQA.fr_model import FRModel
from deepIQA.nr_model import Model
parser = argparse.ArgumentParser(description="evaluate.py")
parser.add_argument("INPUT", help="path to input image")
parser.add_argument(
"REF",
default="",
nargs="?",
help="path to reference image, if omitted NR IQA is assumed",
)
parser.add_argument("--model", "-m", default="", help="path to the trained model")
parser.add_argument(
"--top",
choices=("patchwise", "weighted"),
default="weighted",
help="top layer and loss definition",
)
parser.add_argument("--gpu", "-g", default=0, type=int, help="GPU ID")
args = parser.parse_args()
FR = True
if args.REF == "":
FR = False
if FR:
model = FRModel(top=args.top)
else:
model = Model(top=args.top)
cuda.cudnn_enabled = True
cuda.check_cuda_available()
xp = cuda.cupy
serializers.load_hdf5(args.model, model)
model.to_gpu()
if FR:
ref_img = cv2.imread(args.REF)
ref_img = cv2.cvtColor(ref_img, cv2.COLOR_BGR2RGB)
patches = extract_patches(ref_img, (32, 32, 3), 32)
X_ref = np.transpose(patches.reshape((-1, 32, 32, 3)), (0, 3, 1, 2))
img = cv2.imread(args.INPUT)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
patches = extract_patches(img, (32, 32, 3), 32)
X = np.transpose(patches.reshape((-1, 32, 32, 3)), (0, 3, 1, 2))
y = []
weights = []
batchsize = min(2000, X.shape[0])
t = xp.zeros((1, 1), np.float32)
for i in six.moves.range(0, X.shape[0], batchsize):
X_batch = X[i : i + batchsize]
X_batch = xp.array(X_batch.astype(np.float32))
if FR:
X_ref_batch = X_ref[i : i + batchsize]
X_ref_batch = xp.array(X_ref_batch.astype(np.float32))
model.forward(
X_batch, X_ref_batch, t, False, n_patches_per_image=X_batch.shape[0]
)
else:
model.forward(X_batch, t, False, X_batch.shape[0])
y.append(xp.asnumpy(model.y[0].data).reshape((-1,)))
weights.append(xp.asnumpy(model.a[0].data).reshape((-1,)))
y = np.concatenate(y)
weights = np.concatenate(weights)
print("%f" % (np.sum(y * weights) / np.sum(weights)))
"""
--model
models/nr_tid_patchwise.model
--top
patchwise
/home/alex/work/py/avito/input/train_jpg/0a0a5a3f22320e0508139273d23f390ca837aef252036034ed640fb939529bd9.jpg
"""
| apache-2.0 |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v3.8/resnet-tpuv3-8/code/resnet/model/tpu/models/official/amoeba_net/amoeba_net.py | 5 | 15390 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
r"""TensorFlow AmoebaNet Example.
GCP Run Example
python amoeba_net.py --data_dir=gs://cloud-tpu-datasets/imagenet-data --model_dir=gs://cloud-tpu-ckpts/models/ameoba_net_x/ \
--drop_connect_keep_prob=1.0 --cell_name=evol_net_x --num_cells=12 --reduction_size=256 --image_size=299 --num_epochs=48 \
--train_batch_size=256 --num_epochs_per_eval=4.0 --lr_decay_value=0.89 --lr_num_epochs_per_decay=1 --alsologtostderr \
--tpu=huangyp-tpu-0
"""
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
import amoeba_net_model as model_lib
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# General Parameters
flags.DEFINE_integer(
'num_shards', 8,
'Number of shards (TPU cores).')
flags.DEFINE_integer(
'distributed_group_size', 1,
help='Size of the distributed batch norm. group.'
'Default is normalization over local examples only.'
'When set to a value greater than 1, it will enable'
'a distribtued batch norm. To enable a global batch norm.'
'set distributed_group_size to FLAGS.num_shards')
flags.DEFINE_bool(
'use_tpu', True,
'Use TPUs rather than CPU or GPU.')
flags.DEFINE_string(
'data_dir', '',
'Directory where input data is stored')
flags.DEFINE_string(
'model_dir', None,
'Directory where model output is stored')
flags.DEFINE_integer(
'iterations_per_loop', 500,
'Number of iterations per TPU training loop.')
flags.DEFINE_integer(
'train_batch_size', 256,
'Global (not per-shard) batch size for training')
flags.DEFINE_integer(
'eval_batch_size', 256,
'Global (not per-shard) batch size for evaluation')
flags.DEFINE_float(
'num_epochs', 48.,
'Number of steps use for training.')
flags.DEFINE_float(
'num_epochs_per_eval', 1.,
'Number of training epochs to run between evaluations.')
flags.DEFINE_string(
'mode', 'train_and_eval',
'Mode to run: train, eval, train_and_eval, predict, or export_savedmodel')
flags.DEFINE_integer(
'save_checkpoints_steps', None,
'Interval (in steps) at which the model data '
'should be checkpointed. Set to 0 to disable.')
flags.DEFINE_bool(
'enable_hostcall', True,
'Skip the host_call which is executed every training step. This is'
' generally used for generating training summaries (train loss,'
' learning rate, etc...). When --enable_hostcall=True, there could'
' be a performance drop if host_call function is slow and cannot'
' keep up with the TPU-side computation.')
# Model specific parameters
flags.DEFINE_bool('use_aux_head', True, 'Include aux head or not.')
flags.DEFINE_float(
'aux_scaling', 0.4, 'Scaling factor of aux_head')
flags.DEFINE_float(
'batch_norm_decay', 0.9, 'Batch norm decay.')
flags.DEFINE_float(
'batch_norm_epsilon', 1e-5, 'Batch norm epsilon.')
flags.DEFINE_float(
'dense_dropout_keep_prob', None, 'Dense dropout keep probability.')
flags.DEFINE_float(
'drop_connect_keep_prob', 1.0, 'Drop connect keep probability.')
flags.DEFINE_string(
'drop_connect_version', None, 'Drop connect version.')
flags.DEFINE_string(
'cell_name', 'amoeba_net_d', 'Which network to run.')
flags.DEFINE_integer(
'num_cells', 12, 'Total number of cells.')
flags.DEFINE_integer(
'reduction_size', 256, 'Default cell reduction size.')
flags.DEFINE_integer(
'stem_reduction_size', 32, 'Stem filter size.')
flags.DEFINE_float(
'weight_decay', 4e-05, 'Weight decay for slim model.')
flags.DEFINE_integer(
'num_label_classes', 1001, 'The number of classes that images fit into.')
# Training hyper-parameters
flags.DEFINE_float(
'lr', 0.64, 'Learning rate.')
flags.DEFINE_string(
'optimizer', 'rmsprop',
'Optimizer (one of sgd, rmsprop, momentum)')
flags.DEFINE_float(
'moving_average_decay', 0.9999,
'moving average decay rate')
flags.DEFINE_float(
'lr_decay_value', 0.9,
'Exponential decay rate used in learning rate adjustment')
flags.DEFINE_integer(
'lr_num_epochs_per_decay', 1,
'Exponential decay epochs used in learning rate adjustment')
flags.DEFINE_string(
'lr_decay_method', 'exponential',
'Method of decay: exponential, cosine, constant, stepwise')
flags.DEFINE_float(
'lr_warmup_epochs', 3.0,
'Learning rate increased from zero linearly to lr for the first '
'lr_warmup_epochs.')
flags.DEFINE_float('gradient_clipping_by_global_norm', 0,
'gradient_clipping_by_global_norm')
flags.DEFINE_integer(
'image_size', 299, 'Size of image, assuming image height and width.')
flags.DEFINE_integer(
'num_train_images', 1281167, 'The number of images in the training set.')
flags.DEFINE_integer(
'num_eval_images', 50000, 'The number of images in the evaluation set.')
flags.DEFINE_bool(
'use_bp16', True, 'If True, use bfloat16 for activations')
flags.DEFINE_integer(
'eval_timeout', 60*60*24,
'Maximum seconds between checkpoints before evaluation terminates.')
FLAGS = flags.FLAGS
def build_run_config():
"""Return RunConfig for TPU estimator."""
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
eval_steps = FLAGS.num_eval_images // FLAGS.eval_batch_size
iterations_per_loop = (eval_steps if FLAGS.mode == 'eval'
else FLAGS.iterations_per_loop)
save_checkpoints_steps = FLAGS.save_checkpoints_steps or iterations_per_loop
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=save_checkpoints_steps,
keep_checkpoint_max=None,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=iterations_per_loop,
num_shards=FLAGS.num_shards,
per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
))
return run_config
def build_tensor_serving_input_receiver_fn(
shape, batch_size=1, dtype=tf.float32,):
"""Returns a input_receiver_fn that can be used during serving.
This expects examples to come through as float tensors, and simply
wraps them as TensorServingInputReceivers.
Arguably, this should live in tf.estimator.export. Testing here first.
Args:
shape: list representing target size of a single example.
batch_size: number of input tensors that will be passed for prediction
dtype: the expected datatype for the input example
Returns:
A function that itself returns a TensorServingInputReceiver.
"""
def serving_input_receiver_fn():
# Prep a placeholder where the input example will be fed in
features = tf.placeholder(
dtype=dtype, shape=[batch_size] + shape, name='input_tensor')
return tf.estimator.export.TensorServingInputReceiver(
features=features, receiver_tensors=features)
return serving_input_receiver_fn
# TODO(ereal): simplify this.
def override_with_flags(hparams):
"""Overrides parameters with flag values."""
override_flag_names = [
'aux_scaling',
'train_batch_size',
'batch_norm_decay',
'batch_norm_epsilon',
'dense_dropout_keep_prob',
'drop_connect_keep_prob',
'drop_connect_version',
'eval_batch_size',
'gradient_clipping_by_global_norm',
'lr',
'lr_decay_method',
'lr_decay_value',
'lr_num_epochs_per_decay',
'moving_average_decay',
'image_size',
'num_cells',
'reduction_size',
'stem_reduction_size',
'num_epochs',
'num_epochs_per_eval',
'optimizer',
'enable_hostcall',
'use_aux_head',
'use_bp16',
'use_tpu',
'lr_warmup_epochs',
'weight_decay',
'num_shards',
'distributed_group_size',
'num_train_images',
'num_eval_images',
'num_label_classes',
]
for flag_name in override_flag_names:
flag_value = getattr(FLAGS, flag_name, 'INVALID')
if flag_value == 'INVALID':
tf.logging.fatal('Unknown flag %s.' % str(flag_name))
if flag_value is not None:
_set_or_add_hparam(hparams, flag_name, flag_value)
def build_hparams():
"""Build tf.Hparams for training Amoeba Net."""
hparams = model_lib.build_hparams(FLAGS.cell_name)
override_with_flags(hparams)
return hparams
def _terminate_eval():
tf.logging.info('Timeout passed with no new checkpoints ... terminating eval')
return True
def _get_next_checkpoint():
return tf.contrib.training.checkpoints_iterator(
FLAGS.model_dir,
timeout=FLAGS.eval_timeout,
timeout_fn=_terminate_eval)
def _set_or_add_hparam(hparams, name, value):
if getattr(hparams, name, None) is None:
hparams.add_hparam(name, value)
else:
hparams.set_hparam(name, value)
def _load_global_step_from_checkpoint_dir(checkpoint_dir):
try:
checkpoint_reader = tf.train.NewCheckpointReader(
tf.train.latest_checkpoint(checkpoint_dir))
return checkpoint_reader.get_tensor(tf.GraphKeys.GLOBAL_STEP)
except: # pylint: disable=bare-except
return 0
def main(_):
mode = FLAGS.mode
data_dir = FLAGS.data_dir
model_dir = FLAGS.model_dir
hparams = build_hparams()
estimator_parmas = {}
train_steps_per_epoch = int(
math.ceil(hparams.num_train_images / float(hparams.train_batch_size)))
eval_steps = hparams.num_eval_images // hparams.eval_batch_size
eval_batch_size = (None if mode == 'train' else
hparams.eval_batch_size)
model = model_lib.AmoebaNetEstimatorModel(hparams, model_dir)
if hparams.use_tpu:
run_config = build_run_config()
image_classifier = tf.contrib.tpu.TPUEstimator(
model_fn=model.model_fn,
use_tpu=True,
config=run_config,
params=estimator_parmas,
predict_batch_size=eval_batch_size,
train_batch_size=hparams.train_batch_size,
eval_batch_size=eval_batch_size)
else:
save_checkpoints_steps = (FLAGS.save_checkpoints_steps or
FLAGS.iterations_per_loop)
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.model_dir,
save_checkpoints_steps=save_checkpoints_steps)
image_classifier = tf.estimator.Estimator(
model_fn=model.model_fn,
config=run_config,
params=estimator_parmas)
# Input pipelines are slightly different (with regards to shuffling and
# preprocessing) between training and evaluation.
imagenet_train = model_lib.InputPipeline(
is_training=True, data_dir=data_dir, hparams=hparams)
imagenet_eval = model_lib.InputPipeline(
is_training=False, data_dir=data_dir, hparams=hparams)
if hparams.moving_average_decay < 1:
eval_hooks = [model_lib.LoadEMAHook(model_dir,
hparams.moving_average_decay)]
else:
eval_hooks = []
if mode == 'eval':
for checkpoint in _get_next_checkpoint():
tf.logging.info('Starting to evaluate.')
try:
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=eval_steps,
hooks=eval_hooks,
checkpoint_path=checkpoint)
tf.logging.info('Evaluation results: %s' % eval_results)
except tf.errors.NotFoundError:
# skip checkpoint if it gets deleted prior to evaluation
tf.logging.info('Checkpoint %s no longer exists ... skipping')
elif mode == 'train_and_eval':
current_step = _load_global_step_from_checkpoint_dir(model_dir)
tf.logging.info('Starting training at step=%d.' % current_step)
train_steps_per_eval = int(
hparams.num_epochs_per_eval * train_steps_per_epoch)
# Final Evaluation if training is finished.
if current_step >= hparams.num_epochs * train_steps_per_epoch:
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tf.logging.info('Evaluation results: %s' % eval_results)
while current_step < hparams.num_epochs * train_steps_per_epoch:
image_classifier.train(
input_fn=imagenet_train.input_fn, steps=train_steps_per_eval)
current_step += train_steps_per_eval
tf.logging.info('Starting evaluation at step=%d.' % current_step)
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tf.logging.info('Evaluation results: %s' % eval_results)
elif mode == 'predict':
for checkpoint in _get_next_checkpoint():
tf.logging.info('Starting prediction ...')
time_hook = model_lib.SessionTimingHook()
eval_hooks.append(time_hook)
result_iter = image_classifier.predict(
input_fn=imagenet_eval.input_fn,
hooks=eval_hooks,
checkpoint_path=checkpoint,
yield_single_examples=False)
results = list(itertools.islice(result_iter, eval_steps))
tf.logging.info('Inference speed = {} images per second.'.format(
time_hook.compute_speed(len(results) * eval_batch_size)))
elif mode == 'export_savedmodel':
tf.logging.info('Starting exporting saved model ...')
image_classifier.export_saved_model(
export_dir_base=model_dir + '/export_savedmodel/',
serving_input_receiver_fn=build_tensor_serving_input_receiver_fn(
[hparams.image_size, hparams.image_size, 3],
batch_size=hparams.eval_batch_size),
as_text=True)
else: # default to train mode.
current_step = _load_global_step_from_checkpoint_dir(model_dir)
total_step = int(hparams.num_epochs * train_steps_per_epoch)
if current_step < total_step:
tf.logging.info('Starting training ...')
image_classifier.train(
input_fn=imagenet_train.input_fn,
steps=total_step-current_step)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| apache-2.0 |
grehujt/SmallPythonProjects | ClusteringRelatedPosts/solution.py | 1 | 2849 |
import scipy as sp
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import nltk.stem
stemmer = nltk.stem.SnowballStemmer('english')
class StemmedCountVectorizer(CountVectorizer):
def build_analyzer(self):
analyzer = super(CountVectorizer, self).build_analyzer()
return lambda doc: (stemmer.stem(w) for w in analyzer(doc))
class StemmedTfidfVectorizer(TfidfVectorizer):
def build_analyzer(self):
analyzer = super(StemmedTfidfVectorizer, self).build_analyzer()
return lambda doc: (stemmer.stem(w) for w in analyzer(doc))
# vectorizer = CountVectorizer()
# vectorizer = CountVectorizer(stop_words='english')
# vectorizer = StemmedCountVectorizer(stop_words='english')
vectorizer = StemmedTfidfVectorizer(stop_words='english')
print vectorizer
content = ["How to format my hard disk", " Hard disk format problems "]
X = vectorizer.fit_transform(content)
print vectorizer.get_feature_names()
print X.toarray().T
corpus = [
"This is a toy post about machine learning. Actually, it contains not much interesting stuff.",
"Imaging databases can get huge.",
"Most imaging databases save images permanently.",
"Imaging databases store images.",
"Imaging databases store images. Imaging databases store images. Imaging databases store images."
]
X_train = vectorizer.fit_transform(corpus)
print vectorizer.get_feature_names()
print X_train.shape, X_train.toarray().T
newPost = 'imaging databases'
newVec = vectorizer.transform([newPost])
print newVec
def dist(v1, v2):
# return euclidean distance
return sp.linalg.norm((v1 - v2).toarray())
minI, minDist = 0, 1e10
for i in range(X_train.shape[0]):
d = dist(X_train[i, :], newVec)
print i, d
if d < minDist:
minDist = d
minI = i
print 'most related', minI, minDist
def dist_norm(v1, v2):
v1_normed = v1 / sp.linalg.norm(v1.toarray())
v2_normed = v2 / sp.linalg.norm(v2.toarray())
return sp.linalg.norm((v1_normed - v2_normed).toarray())
minI, minDist = 0, 1e10
for i in range(X_train.shape[0]):
d = dist_norm(X_train[i, :], newVec)
print i, d
if d < minDist:
minDist = d
minI = i
print 'most related', minI, minDist
print X_train[1,:]
print
print X_train[3,:]
# stemmer = nltk.stem.SnowballStemmer('english')
print stemmer.stem("image") # imag
print stemmer.stem("images") # imag
print stemmer.stem("imaging") # imag
print stemmer.stem("imagination") # imagin
def tfidf(word, post, corpus):
tf = post.count(word) * 1.0 / len(post)
numPosts = len([p for p in corpus if word in p])
idf = sp.log2(len(corpus) * 1.0 / numPosts)
return tf * idf
a, abb, abc = ['a'], ['a', 'b', 'b'], ['a', 'b', 'c']
corpus = [a, abb, abc]
print tfidf('a', a, corpus)
print tfidf('b', abb, corpus)
print tfidf('c', abc, corpus)
| mit |
jzt5132/scikit-learn | sklearn/__init__.py | 59 | 3038 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
LohithBlaze/scikit-learn | sklearn/tree/tests/test_export.py | 75 | 9318 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
| bsd-3-clause |
jzt5132/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 380 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
aspidites/django | django/contrib/gis/geoip2/base.py | 73 | 8630 | import os
import socket
import geoip2.database
from django.conf import settings
from django.core.validators import ipv4_re
from django.utils import six
from django.utils.ipv6 import is_valid_ipv6_address
from .resources import City, Country
# Creating the settings dictionary with any settings, if needed.
GEOIP_SETTINGS = {
'GEOIP_PATH': getattr(settings, 'GEOIP_PATH', None),
'GEOIP_CITY': getattr(settings, 'GEOIP_CITY', 'GeoLite2-City.mmdb'),
'GEOIP_COUNTRY': getattr(settings, 'GEOIP_COUNTRY', 'GeoLite2-Country.mmdb'),
}
class GeoIP2Exception(Exception):
pass
class GeoIP2(object):
# The flags for GeoIP memory caching.
# Try MODE_MMAP_EXT, MODE_MMAP, MODE_FILE in that order.
MODE_AUTO = 0
# Use the C extension with memory map.
MODE_MMAP_EXT = 1
# Read from memory map. Pure Python.
MODE_MMAP = 2
# Read database as standard file. Pure Python.
MODE_FILE = 4
# Load database into memory. Pure Python.
MODE_MEMORY = 8
cache_options = {opt: None for opt in (0, 1, 2, 4, 8)}
# Paths to the city & country binary databases.
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initialize the GeoIP object. No parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP datasets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.mmdb) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH setting.
* cache: The cache settings when opening up the GeoIP datasets. May be
an integer in (0, 1, 2, 4, 8) corresponding to the MODE_AUTO,
MODE_MMAP_EXT, MODE_MMAP, MODE_FILE, and MODE_MEMORY,
`GeoIPOptions` C API settings, respectively. Defaults to 0,
meaning MODE_AUTO.
* country: The name of the GeoIP country data file. Defaults to
'GeoLite2-Country.mmdb'; overrides the GEOIP_COUNTRY setting.
* city: The name of the GeoIP city data file. Defaults to
'GeoLite2-City.mmdb'; overrides the GEOIP_CITY setting.
"""
# Checking the given cache option.
if cache in self.cache_options:
self._cache = cache
else:
raise GeoIP2Exception('Invalid GeoIP caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS['GEOIP_PATH']
if not path:
raise GeoIP2Exception('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, six.string_types):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try to open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS['GEOIP_COUNTRY'])
if os.path.isfile(country_db):
self._country = geoip2.database.Reader(country_db, mode=cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS['GEOIP_CITY'])
if os.path.isfile(city_db):
self._city = geoip2.database.Reader(city_db, mode=cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure out
# whether the given database path is for the GeoIP country or city
# databases.
reader = geoip2.database.Reader(path, mode=cache)
db_type = reader.metadata().database_type
if db_type.endswith('City'):
# GeoLite City database detected.
self._city = reader
self._city_file = path
elif db_type.endswith('Country'):
# GeoIP Country database detected.
self._country = reader
self._country_file = path
else:
raise GeoIP2Exception('Unable to recognize database edition: %s' % db_type)
else:
raise GeoIP2Exception('GeoIP path must be a valid file or directory.')
@property
def _reader(self):
if self._country:
return self._country
else:
return self._city
@property
def _country_or_city(self):
if self._country:
return self._country.country
else:
return self._city.city
def __del__(self):
# Cleanup any GeoIP file handles lying around.
if self._reader:
self._reader.close()
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Helper routine for checking the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, six.string_types):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIP2Exception('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIP2Exception('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIP2Exception('Invalid GeoIP city data file: %s' % self._city_file)
# Return the query string back to the caller. GeoIP2 only takes IP addresses.
if not (ipv4_re.match(query) or is_valid_ipv6_address(query)):
query = socket.gethostbyname(query)
return query
def city(self, query):
"""
Return a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
enc_query = self._check_query(query, city=True)
return City(self._city.city(enc_query))
def country_code(self, query):
"Return the country code for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_code']
def country_name(self, query):
"Return the country name for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_name']
def country(self, query):
"""
Return a dictionary with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
enc_query = self._check_query(query, city_or_country=True)
return Country(self._country_or_city(enc_query))
# #### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None:
return None
else:
return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Return a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Return a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Return a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
# #### GeoIP Database Information Routines ####
@property
def info(self):
"Return information about the GeoIP library and databases in use."
meta = self._reader.metadata()
return 'GeoIP Library:\n\t%s.%s\n' % (meta.binary_format_major_version, meta.binary_format_minor_version)
@classmethod
def open(cls, full_path, cache):
return GeoIP2(full_path, cache)
| bsd-3-clause |
qinjian623/dlnotes | tutorials/pytorch/drl/enduro.py | 1 | 5020 | import argparse
import gym
import numpy as np
import cv2
from itertools import count
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
parser = argparse.ArgumentParser(description='PyTorch REINFORCE example')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor (default: 0.99)')
parser.add_argument('--seed', type=int, default=543, metavar='N',
help='random seed (default: 543)')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--log_interval', type=int, default=10, metavar='N',
help='interval between training status logs (default: 10)')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
args = parser.parse_args()
torch.manual_seed(args.seed)
if not torch.cuda.is_available():
args.cuda = False
if torch.cuda.is_available() and args.cuda:
torch.cuda.manual_seed(args.seed)
env = gym.make('Enduro-v0')
env.seed(args.seed)
torch.manual_seed(args.seed)
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=3)
self.conv2 = nn.Conv2d(10, 20, kernel_size=3)
self.conv3 = nn.Conv2d(20, 40, kernel_size=3)
self.conv4 = nn.Conv2d(40, 40, kernel_size=3)
self.fc1 = nn.Linear(4200, 50)
self.fc2 = nn.Linear(50, 6)
self.saved_actions = []
self.rewards = []
self.saved_actions = []
self.rewards = []
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(self.conv2(x))
x = F.relu(F.max_pool2d(self.conv3(x), 2))
x = F.relu(self.conv4(x))
x = x.view(-1, 4200)
x = F.relu(self.fc1(x))
action_scores = self.fc2(x)
return F.softmax(action_scores)
model = Policy()
if args.cuda:
model.cuda()
optimizer = optim.Adam(model.parameters(), lr=1e-2)
def select_action(state):
state = cv2.resize(state, (40, 103))
state = np.transpose(state, (2, 0, 1))
# print(state.shape)
state = torch.from_numpy(state).float().unsqueeze(0)
# Norm
state /= 255
if args.cuda:
state = state.cuda()
probs = model(Variable(state))
action = probs.multinomial()
model.saved_actions.append(action)
return action.data
def finish_episode():
R = 0
rewards = []
# Weight sum of rewards
for r in model.rewards[::-1]:
R = r + args.gamma * R
rewards.insert(0, R)
rewards = torch.Tensor(rewards)
# Norm
rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)
# What'is the action?
for action, r in zip(model.saved_actions, rewards):
# print(action)
# print(type(action))
action.reinforce(r)
optimizer.zero_grad()
autograd.backward(model.saved_actions, [None for _ in model.saved_actions])
optimizer.step()
del model.rewards[:]
del model.saved_actions[:]
running_reward = 0
need_reset = True
reward = 0
meaning = env.env.get_action_meanings()
for i_episode in count(1):
if need_reset:
state = env.reset()
need_reset = False
reward = 0
running_reward = 0
for t in range(500): # Don't infinite loop while learning
action = select_action(state)
# print(meaning[action[0, 0]])
# ['NOOP', 'FIRE', 'RIGHT', 'LEFT', 'DOWN',
# 'DOWNRIGHT', 'DOWNLEFT', 'RIGHTFIRE', 'LEFTFIRE']
# env.step(1)
if action[0, 0] <= 3:
state, new_reward, done, _ = env.step(action[0, 0])
else:
state, new_reward, done, _ = env.step(action[0, 0]+3)
if args.render:
env.render()
# print(env.env.get_action_meanings())
# exit()
# print(reward)
if done:
model.rewards.append(-10000)
elif new_reward == 0.0:
model.rewards.append(-3000)
elif new_reward == reward:
model.rewards.append(-500)
elif new_reward < reward:
model.rewards.append(-2000)
else:
model.rewards.append(1000)
reward = new_reward
running_reward += reward
if done:
print("DONE")
# print(reward)
# input()
need_reset = True
break
print("Break episode.")
# running_reward = running_reward * 0.99 + t * 0.01
finish_episode()
if i_episode % args.log_interval == 0:
print('Episode {}\tLast length: {:5d}\tAverage length: {:.2f}'.format(
i_episode, t, running_reward))
if running_reward > 195:
print("Solved! Running reward is now {} and "
"the last episode runs to {} time steps!".format(running_reward, t))
break
# exit()
| gpl-3.0 |
jzt5132/scikit-learn | examples/mixture/plot_gmm_selection.py | 247 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
MohammedWasim/scikit-learn | sklearn/mixture/tests/test_gmm.py | 199 | 17427 | import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
LohithBlaze/scikit-learn | sklearn/datasets/svmlight_format.py | 113 | 15826 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
jzt5132/scikit-learn | examples/feature_selection/plot_feature_selection.py | 248 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
JSilva90/MITWS | Asynchronous_SFS/new_async.py | 1 | 5082 | from __future__ import division
import multiprocessing as mp
import time
import pandas as pd
import numpy as np
import sys
import utils
#from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from ML_test import ML_test
from ML_data import ML_instance
max_search_level = 50
expansions_per_worker = 1
expansions_per_level = 5
min_share_work = 3
score_lock = 0
work_lock = 1
total_locks = 2
debug = True
def starter(work, ml_instance, n_workers):
t = time.time()
manager = mp.Manager() ##manager creates a new process to handle variables, may not be the best option
##setup for paralelization
locks = []
for i in range(total_locks):
locks.append(mp.Lock())
global_info = manager.Namespace() ##manager for variables
global_info.history = {} ##saves the history
global_info.expanded_history = []
global_info.best_scores = {}
global_info.worklist = work
workers = []
for i in range (n_workers-1):
p = mp.Process(target=search, args=(i, locks, ml_instance, global_info)) ##cant pass arguments to evaluator
workers.append(p)
p.start()
search(n_workers-1, locks, ml_instance, global_info)
for w in workers:
w.join()
print "Total time, ", time.time() - t, " tested a total of:", len(global_info.history)
tester = ML_test()
tester.history = global_info.history
tester.save_history(filename="history_async")
df = pd.DataFrame()
df["expanded"] = global_info.expanded_history
df.to_csv("expanded_async.csv", index = False)
def update_score(id, subset, score, lock, global_info):
size = len(subset)
lock.acquire()
aux = global_info.best_scores
if size not in aux:
aux[size] = []
if len(aux[size]) < 8 or aux[size][-1][0] < score :
aux[size].append((score, subset))
aux[size] = sorted(aux[size], key=lambda tup: tup[0])
aux[size].reverse()
aux[size] = aux[size][:8]
global_info.best_scores = aux
lock.release()
def expand_stage(id, global_info, ml_instance, score_lock, current_size ):
t = time.time()
print id, " nominated to generate work for current_size ", current_size
if current_size == max_search_level:
print id , " determined its time to end search"
aux = []
for i in range(50): ##append 50 subsets greater than max search level so everyone stops
aux.append(range(max_search_level+3))
global_info.worklist = list(aux)
return
score_lock.acquire()
subsets_to_expand = global_info.best_scores[current_size][:expansions_per_level]
work = []
hist = global_info.history
exp_hist = global_info.expanded_history
for s in subsets_to_expand:
subset = s[1]
exp_hist.append(subset)
for i in ml_instance.features:
if i not in subset:
sub = subset + [i]
sub.sort()
aux = [str(x) for x in sub]
aux = ",".join(aux)
if aux not in hist:
work.append(sub) ##add subset for next round
hist[aux] = True ##add subset to the testing
print id, " genereated ", len(work), " new subsets in ", round(time.time() - t, 3), " seconds"
global_info.history = hist
global_info.expanded_history = exp_hist
global_info.worklist = list(work)
score_lock.release()
def search(id, locks, ml_instance, global_info):
tasks = 0
task_time = []
start_t = time.time()
current_size = 1
worklist_waste = 0.0
update_waste = 0.0
while (True):
##mutual exclusion lock to get a subset from worklist
aux_t = time.time()
locks[work_lock].acquire()
worklist_waste += (time.time() - aux_t)
if global_info.worklist == []:
expand_stage(id, global_info, ml_instance, locks[score_lock], current_size) ##acquires both locks to prevent strange stuff on the scores
aux = list(global_info.worklist)
subset = aux[0]
del(aux[0])
global_info.worklist = aux
locks[work_lock].release()
if tasks % 4 == 0: ##periodicall print
print id, " processed ", tasks, " remaining work ", len(aux)
if len(subset) > max_search_level: ##search ends
break
current_size = len(subset)
tasks += 1
t = time.time()
score = ml_instance.rf_evaluator(subset)
#score = ml_instance.svm_evaluator(subset)
task_time.append(round(time.time()-t, 2))
t = time.time()
update_score(id, subset, score, locks[score_lock], global_info)
update_waste += (time.time() -t)
print id, " ending processing ", tasks, " in ", round(time.time()-start_t,0), " seconds"
print id, " wasted in worklist access ", round(worklist_waste,2), " updating ", round(update_waste,2), " average task time", round(sum(task_time) / len(task_time),2)
| gpl-3.0 |
glouppe/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 150 | 2951 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='navy', lw=2)
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='darkorange', lw=2)
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='navy', lw=2)
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='darkorange', lw=2)
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
glouppe/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 22 | 45330 | from itertools import product
import pickle
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity', include_self=True)
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity', include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity', include_self=True)
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_kneighbors_parallel():
X, y = datasets.make_classification(n_samples=10, n_features=2,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y_1 = clf.predict(X_test)
dist_1, ind_1 = clf.kneighbors(X_test)
A_1 = clf.kneighbors_graph(X_test, mode='distance').toarray()
for n_jobs in [-1, 2, 5]:
clf.set_params(n_jobs=n_jobs)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
A = clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y_1, y)
assert_array_almost_equal(dist_1, dist)
assert_array_equal(ind_1, ind)
assert_array_almost_equal(A_1, A)
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
automl/auto-sklearn | autosklearn/pipeline/components/data_preprocessing/rescaling/power_transformer.py | 1 | 1715 | from typing import Dict, Optional, Tuple, Union
import numpy as np
from autosklearn.pipeline.base import DATASET_PROPERTIES_TYPE
from autosklearn.pipeline.components.base import AutoSklearnPreprocessingAlgorithm
from autosklearn.pipeline.components.data_preprocessing.rescaling.abstract_rescaling import ( # noqa: E501
Rescaling,
)
from autosklearn.pipeline.constants import DENSE, INPUT, UNSIGNED_DATA
class PowerTransformerComponent(Rescaling, AutoSklearnPreprocessingAlgorithm):
def __init__(
self,
random_state: Optional[Union[int, np.random.RandomState]] = None,
) -> None:
from sklearn.preprocessing import PowerTransformer
self.preprocessor = PowerTransformer(copy=False)
@staticmethod
def get_properties(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
) -> Dict[str, Optional[Union[str, int, bool, Tuple]]]:
return {
"shortname": "PowerTransformer",
"name": "PowerTransformer",
"handles_missing_values": False,
"handles_nominal_values": False,
"handles_numerical_features": True,
"prefers_data_scaled": False,
"prefers_data_normalized": False,
"handles_regression": True,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": True,
"is_deterministic": True,
# TODO find out of this is right!
"handles_sparse": False,
"handles_dense": True,
"input": (DENSE, UNSIGNED_DATA),
"output": (INPUT,),
"preferred_dtype": None,
}
| bsd-3-clause |
fx2003/tensorflow-study | TensorFlow实战/models/cognitive_mapping_and_planning/scripts/script_preprocess_annoations_S3DIS.py | 14 | 7024 | # Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import glob
import numpy as np
import logging
import cPickle
from datasets import nav_env
from datasets import factory
from src import utils
from src import map_utils as mu
logging.basicConfig(level=logging.INFO)
DATA_DIR = 'data/stanford_building_parser_dataset_raw/'
mkdir_if_missing = utils.mkdir_if_missing
save_variables = utils.save_variables
def _get_semantic_maps(building_name, transform, map_, flip, cats):
rooms = get_room_in_building(building_name)
maps = []
for cat in cats:
maps.append(np.zeros((map_.size[1], map_.size[0])))
for r in rooms:
room = load_room(building_name, r, category_list=cats)
classes = room['class_id']
for i, cat in enumerate(cats):
c_ind = cats.index(cat)
ind = [_ for _, c in enumerate(classes) if c == c_ind]
if len(ind) > 0:
vs = [room['vertexs'][x]*1 for x in ind]
vs = np.concatenate(vs, axis=0)
if transform:
vs = np.array([vs[:,1], vs[:,0], vs[:,2]]).T
vs[:,0] = -vs[:,0]
vs[:,1] += 4.20
vs[:,0] += 6.20
vs = vs*100.
if flip:
vs[:,1] = -vs[:,1]
maps[i] = maps[i] + \
mu._project_to_map(map_, vs, ignore_points_outside_map=True)
return maps
def _map_building_name(building_name):
b = int(building_name.split('_')[0][4])
out_name = 'Area_{:d}'.format(b)
if b == 5:
if int(building_name.split('_')[0][5]) == 1:
transform = True
else:
transform = False
else:
transform = False
return out_name, transform
def get_categories():
cats = ['beam', 'board', 'bookcase', 'ceiling', 'chair', 'clutter', 'column',
'door', 'floor', 'sofa', 'table', 'wall', 'window']
return cats
def _write_map_files(b_in, b_out, transform):
cats = get_categories()
env = utils.Foo(padding=10, resolution=5, num_point_threshold=2,
valid_min=-10, valid_max=200, n_samples_per_face=200)
robot = utils.Foo(radius=15, base=10, height=140, sensor_height=120,
camera_elevation_degree=-15)
building_loader = factory.get_dataset('sbpd')
for flip in [False, True]:
b = nav_env.Building(b_out, robot, env, flip=flip,
building_loader=building_loader)
logging.info("building_in: %s, building_out: %s, transform: %d", b_in,
b_out, transform)
maps = _get_semantic_maps(b_in, transform, b.map, flip, cats)
maps = np.transpose(np.array(maps), axes=[1,2,0])
# Load file from the cache.
file_name = '{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.pkl'
file_name = file_name.format(b.building_name, b.map.size[0], b.map.size[1],
b.map.origin[0], b.map.origin[1],
b.map.resolution, flip)
out_file = os.path.join(DATA_DIR, 'processing', 'class-maps', file_name)
logging.info('Writing semantic maps to %s.', out_file)
save_variables(out_file, [maps, cats], ['maps', 'cats'], overwrite=True)
def _transform_area5b(room_dimension):
for a in room_dimension.keys():
r = room_dimension[a]*1
r[[0,1,3,4]] = r[[1,0,4,3]]
r[[0,3]] = -r[[3,0]]
r[[1,4]] += 4.20
r[[0,3]] += 6.20
room_dimension[a] = r
return room_dimension
def collect_room(building_name, room_name):
room_dir = os.path.join(DATA_DIR, 'Stanford3dDataset_v1.2', building_name,
room_name, 'Annotations')
files = glob.glob1(room_dir, '*.txt')
files = sorted(files, key=lambda s: s.lower())
vertexs = []; colors = [];
for f in files:
file_name = os.path.join(room_dir, f)
logging.info(' %s', file_name)
a = np.loadtxt(file_name)
vertex = a[:,:3]*1.
color = a[:,3:]*1
color = color.astype(np.uint8)
vertexs.append(vertex)
colors.append(color)
files = [f.split('.')[0] for f in files]
out = {'vertexs': vertexs, 'colors': colors, 'names': files}
return out
def load_room(building_name, room_name, category_list=None):
room = collect_room(building_name, room_name)
room['building_name'] = building_name
room['room_name'] = room_name
instance_id = range(len(room['names']))
room['instance_id'] = instance_id
if category_list is not None:
name = [r.split('_')[0] for r in room['names']]
class_id = []
for n in name:
if n in category_list:
class_id.append(category_list.index(n))
else:
class_id.append(len(category_list))
room['class_id'] = class_id
room['category_list'] = category_list
return room
def get_room_in_building(building_name):
building_dir = os.path.join(DATA_DIR, 'Stanford3dDataset_v1.2', building_name)
rn = os.listdir(building_dir)
rn = [x for x in rn if os.path.isdir(os.path.join(building_dir, x))]
rn = sorted(rn, key=lambda s: s.lower())
return rn
def write_room_dimensions(b_in, b_out, transform):
rooms = get_room_in_building(b_in)
room_dimension = {}
for r in rooms:
room = load_room(b_in, r, category_list=None)
vertex = np.concatenate(room['vertexs'], axis=0)
room_dimension[r] = np.concatenate((np.min(vertex, axis=0), np.max(vertex, axis=0)), axis=0)
if transform == 1:
room_dimension = _transform_area5b(room_dimension)
out_file = os.path.join(DATA_DIR, 'processing', 'room-dimension', b_out+'.pkl')
save_variables(out_file, [room_dimension], ['room_dimension'], overwrite=True)
def write_room_dimensions_all(I):
mkdir_if_missing(os.path.join(DATA_DIR, 'processing', 'room-dimension'))
bs_in = ['Area_1', 'Area_2', 'Area_3', 'Area_4', 'Area_5', 'Area_5', 'Area_6']
bs_out = ['area1', 'area2', 'area3', 'area4', 'area5a', 'area5b', 'area6']
transforms = [0, 0, 0, 0, 0, 1, 0]
for i in I:
b_in = bs_in[i]
b_out = bs_out[i]
t = transforms[i]
write_room_dimensions(b_in, b_out, t)
def write_class_maps_all(I):
mkdir_if_missing(os.path.join(DATA_DIR, 'processing', 'class-maps'))
bs_in = ['Area_1', 'Area_2', 'Area_3', 'Area_4', 'Area_5', 'Area_5', 'Area_6']
bs_out = ['area1', 'area2', 'area3', 'area4', 'area5a', 'area5b', 'area6']
transforms = [0, 0, 0, 0, 0, 1, 0]
for i in I:
b_in = bs_in[i]
b_out = bs_out[i]
t = transforms[i]
_write_map_files(b_in, b_out, t)
if __name__ == '__main__':
write_room_dimensions_all([0, 2, 3, 4, 5, 6])
write_class_maps_all([0, 2, 3, 4, 5, 6])
| mit |
switowski/invenio | invenio/modules/indexer/tokenizers/BibIndexDOITokenizer.py | 12 | 2004 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.modules.indexer.tokenizers.BibIndexFilteringTokenizer import BibIndexFilteringTokenizer
class BibIndexDOITokenizer(BibIndexFilteringTokenizer):
"""
Filtering tokenizer which tokenizes DOI tag (0247_a)
only if "0247_2" tag is present and its value equals "DOI"
and 909C4a tag without any constraints.
"""
def __init__(self, stemming_language=None, remove_stopwords=False, remove_html_markup=False, remove_latex_markup=False):
self.rules = (('0247_a', '2', 'DOI'), ('909C4a', '', ''))
def get_tokenizing_function(self, wordtable_type):
"""Returns proper tokenizing function"""
return self.tokenize
def tokenize_via_recjson(self, recID):
"""
Nonmarc version of tokenize function for DOI.
Note: with nonmarc we don't need to filter anymore.
We just need to take value from record because we
use bibfield here.
"""
rec = get_record(recID)
values = rec.get('doi', [])
return values
def get_nonmarc_tokenizing_function(self, table_type):
"""
Returns proper tokenizing function for non-marc records.
"""
return self.tokenize_via_recjson
| gpl-2.0 |
MatthieuCourbariaux/BinaryConnect | svhn.py | 3 | 10981 | # Copyright 2015 Matthieu Courbariaux
# This file is part of BinaryConnect.
# BinaryConnect is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# BinaryConnect is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with BinaryConnect. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import sys
import os
import time
import numpy as np
np.random.seed(1234) # for reproducibility?
# specifying the gpu to use
# import theano.sandbox.cuda
# theano.sandbox.cuda.use('gpu1')
import theano
import theano.tensor as T
import lasagne
import cPickle as pickle
import gzip
import batch_norm
import binary_connect
from pylearn2.datasets.svhn import SVHN
from pylearn2.utils import serial
from collections import OrderedDict
if __name__ == "__main__":
# Batch Normalization parameters
batch_size = 50
print("batch_size = "+str(batch_size))
# alpha is the exponential moving average factor
alpha = .1
print("alpha = "+str(alpha))
epsilon = 1e-4
print("epsilon = "+str(epsilon))
# Training parameters
num_epochs = 200
print("num_epochs = "+str(num_epochs))
# BinaryConnect
binary = True
print("binary = "+str(binary))
stochastic = True
print("stochastic = "+str(stochastic))
# (-H,+H) are the two binary values
# H = "Glorot"
H = 1.
print("H = "+str(H))
# W_LR_scale = 1.
W_LR_scale = "Glorot" # "Glorot" means we are using the coefficients from Glorot's paper
print("W_LR_scale = "+str(W_LR_scale))
# Decaying LR
LR_start = 0.01
print("LR_start = "+str(LR_start))
LR_fin = 0.000003
print("LR_fin = "+str(LR_fin))
LR_decay = (LR_fin/LR_start)**(1./num_epochs)
print("LR_decay = "+str(LR_decay))
# BTW, LR decay might good for the BN moving average...
print('Loading SVHN dataset')
train_set = SVHN(
which_set= 'splitted_train',
path= "${SVHN_LOCAL_PATH}",
axes= ['b', 'c', 0, 1])
valid_set = SVHN(
which_set= 'valid',
path= "${SVHN_LOCAL_PATH}",
axes= ['b', 'c', 0, 1])
test_set = SVHN(
which_set= 'test',
path= "${SVHN_LOCAL_PATH}",
axes= ['b', 'c', 0, 1])
# bc01 format
# print train_set.X.shape
train_set.X = np.reshape(train_set.X,(-1,3,32,32))
valid_set.X = np.reshape(valid_set.X,(-1,3,32,32))
test_set.X = np.reshape(test_set.X,(-1,3,32,32))
# for hinge loss (targets are already onehot)
train_set.y = np.subtract(np.multiply(2,train_set.y),1.)
valid_set.y = np.subtract(np.multiply(2,valid_set.y),1.)
test_set.y = np.subtract(np.multiply(2,test_set.y),1.)
print('Building the CNN...')
# Prepare Theano variables for inputs and targets
input = T.tensor4('inputs')
target = T.matrix('targets')
LR = T.scalar('LR', dtype=theano.config.floatX)
cnn = lasagne.layers.InputLayer(
shape=(None, 3, 32, 32),
input_var=input)
# 64C3-64C3-P2
cnn = binary_connect.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=64,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = batch_norm.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha,
nonlinearity=lasagne.nonlinearities.rectify)
cnn = binary_connect.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=64,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))
cnn = batch_norm.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha,
nonlinearity=lasagne.nonlinearities.rectify)
# 128C3-128C3-P2
cnn = binary_connect.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=128,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = batch_norm.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha,
nonlinearity=lasagne.nonlinearities.rectify)
cnn = binary_connect.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=128,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))
cnn = batch_norm.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha,
nonlinearity=lasagne.nonlinearities.rectify)
# 256C3-256C3-P2
cnn = binary_connect.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=256,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = batch_norm.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha,
nonlinearity=lasagne.nonlinearities.rectify)
cnn = binary_connect.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=256,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))
cnn = batch_norm.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha,
nonlinearity=lasagne.nonlinearities.rectify)
# print(cnn.output_shape)
# 1024FP-1024FP-10FP
cnn = binary_connect.DenseLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
nonlinearity=lasagne.nonlinearities.identity,
num_units=1024)
cnn = batch_norm.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha,
nonlinearity=lasagne.nonlinearities.rectify)
cnn = binary_connect.DenseLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
nonlinearity=lasagne.nonlinearities.identity,
num_units=1024)
cnn = batch_norm.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha,
nonlinearity=lasagne.nonlinearities.rectify)
cnn = binary_connect.DenseLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
nonlinearity=lasagne.nonlinearities.identity,
num_units=10)
cnn = batch_norm.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha,
nonlinearity=lasagne.nonlinearities.identity)
train_output = lasagne.layers.get_output(cnn, deterministic=False)
# squared hinge loss
loss = T.mean(T.sqr(T.maximum(0.,1.-target*train_output)))
if binary:
# W updates
W = lasagne.layers.get_all_params(cnn, binary=True)
W_grads = binary_connect.compute_grads(loss,cnn)
updates = lasagne.updates.adam(loss_or_grads=W_grads, params=W, learning_rate=LR)
updates = binary_connect.clipping_scaling(updates,cnn)
# other parameters updates
params = lasagne.layers.get_all_params(cnn, trainable=True, binary=False)
updates = OrderedDict(updates.items() + lasagne.updates.adam(loss_or_grads=loss, params=params, learning_rate=LR).items())
else:
params = lasagne.layers.get_all_params(cnn, trainable=True)
updates = lasagne.updates.adam(loss_or_grads=loss, params=params, learning_rate=LR)
test_output = lasagne.layers.get_output(cnn, deterministic=True)
test_loss = T.mean(T.sqr(T.maximum(0.,1.-target*test_output)))
test_err = T.mean(T.neq(T.argmax(test_output, axis=1), T.argmax(target, axis=1)),dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving the updates dictionary)
# and returning the corresponding training loss:
train_fn = theano.function([input, target, LR], loss, updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input, target], [test_loss, test_err])
print('Training...')
binary_connect.train(
train_fn,val_fn,
batch_size,
LR_start,LR_decay,
num_epochs,
train_set.X,train_set.y,
valid_set.X,valid_set.y,
test_set.X,test_set.y)
# print("display histogram")
# W = lasagne.layers.get_all_layers(mlp)[2].W.get_value()
# print(W.shape)
# histogram = np.histogram(W,bins=1000,range=(-1.1,1.1))
# np.savetxt(str(dropout_hidden)+str(binary)+str(stochastic)+str(H)+"_hist0.csv", histogram[0], delimiter=",")
# np.savetxt(str(dropout_hidden)+str(binary)+str(stochastic)+str(H)+"_hist1.csv", histogram[1], delimiter=",")
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', lasagne.layers.get_all_param_values(network)) | gpl-2.0 |
sgenoud/scikit-learn | examples/linear_model/plot_sgd_iris.py | 3 | 2170 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = pl.contourf(xx, yy, Z, cmap=pl.cm.Paired)
pl.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
pl.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=pl.cm.Paired)
pl.title("Decision surface of multi-class SGD")
pl.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = pl.xlim()
ymin, ymax = pl.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
pl.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
pl.legend()
pl.show()
| bsd-3-clause |
glouppe/scikit-learn | sklearn/cluster/__init__.py | 359 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
luo66/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 296 | 1770 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
elkingtonmcb/h2o-2 | bench/BMscripts/glmBench.py | 11 | 9671 | #GLM bench
import os, sys, time, csv, socket, string
sys.path.append('../py/')
sys.path.extend(['.','..'])
import h2o_cmd, h2o, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_rf, h2o_jobs
csv_header = ('h2o_build','nMachines','nJVMs','Xmx/JVM','dataset','nTrainRows','nTestRows','nCols','trainParseWallTime','nfolds','family','glmBuildTime','testParseWallTime','scoreTime','AUC','AIC','error')
files = {'Airlines' : {'train': ('AirlinesTrain1x', 'AirlinesTrain10x', 'AirlinesTrain100x'), 'test' : 'AirlinesTest'},
'AllBedrooms' : {'train': ('AllBedroomsTrain1x', 'AllBedroomsTrain10x', 'AllBedroomsTrain100x'), 'test' : 'AllBedroomsTest'},
}
build = ""
debug = False
json = ""
def doGLM(f, folderPath, family, link, lambda_, alpha, nfolds, y, x, testFilehex, row):
debug = False
bench = "bench"
if debug:
print "DOING GLM DEBUG"
bench = "bench/debug"
date = '-'.join([str(z) for z in list(time.localtime())][0:3])
overallWallStart = time.time()
pre = ""
if debug: pre = "DEBUG"
glmbenchcsv = 'benchmarks/'+build+'/'+pre+'glmbench.csv'
if not os.path.exists(glmbenchcsv):
output = open(glmbenchcsv,'w')
output.write(','.join(csv_header)+'\n')
else:
output = open(glmbenchcsv,'a')
csvWrt = csv.DictWriter(output, fieldnames=csv_header, restval=None,
dialect='excel', extrasaction='ignore',delimiter=',')
try:
java_heap_GB = h2o.nodes[0].java_heap_GB
importFolderPath = bench + "/" + folderPath
if (f in ['AirlinesTrain1x','AllBedroomsTrain1x', 'AllBedroomsTrain10x', 'AllBedroomsTrain100x']):
csvPathname = importFolderPath + "/" + f + '.csv'
else:
csvPathname = importFolderPath + "/" + f + "/*linked*"
hex_key = f + '.hex'
hK = folderPath + "Header.csv"
headerPathname = importFolderPath + "/" + hK
h2i.import_only(bucket='home-0xdiag-datasets', path=headerPathname)
headerKey = h2i.find_key(hK)
trainParseWallStart = time.time()
parseResult = h2i.import_parse(bucket = 'home-0xdiag-datasets',
path = csvPathname,
schema = 'local',
hex_key = hex_key,
header = 1,
header_from_file = headerKey,
separator = 44,
timeoutSecs = 7200,
retryDelaySecs = 5,
pollTimeoutSecs = 7200,
doSummary = False
)
parseWallTime = time.time() - trainParseWallStart
print "Parsing training file took ", parseWallTime ," seconds."
inspect_train = h2o.nodes[0].inspect(parseResult['destination_key'], timeoutSecs=7200)
inspect_test = h2o.nodes[0].inspect(testFilehex, timeoutSecs=7200)
nMachines = 1 if len(h2o_hosts.hosts) is 0 else len(h2o_hosts.hosts)
row.update( {'h2o_build' : build,
'nMachines' : nMachines,
'nJVMs' : len(h2o.nodes),
'Xmx/JVM' : java_heap_GB,
'dataset' : f,
'nTrainRows' : inspect_train['num_rows'],
'nTestRows' : inspect_test['num_rows'],
'nCols' : inspect_train['num_cols'],
'trainParseWallTime' : parseWallTime,
'nfolds' : nfolds,
'family' : family,
})
params = {'y' : y,
'x' : x,
'family' : family,
'link' : link,
'lambda' : lambda_,
'alpha' : alpha,
'n_folds' : nfolds,
'case_mode' : "n/a",
'destination_key' : "GLM("+f+")",
'expert_settings' : 0,
}
kwargs = params.copy()
glmStart = time.time()
glm = h2o_cmd.runGLM(parseResult = parseResult,
timeoutSecs = 7200,
**kwargs)
glmTime = time.time() - glmStart
row.update( {'glmBuildTime' : glmTime,
#'AverageErrorOver10Folds' : glm['GLMModel']['validations'][0]['err'],
})
glmScoreStart = time.time()
glmScore = h2o_cmd.runGLMScore(key = testFilehex,
model_key = params['destination_key'],
timeoutSecs = 1800)
scoreTime = time.time() - glmScoreStart
cmd = 'bash startloggers.sh ' + json + ' stop_'
os.system(cmd)
if family == "binomial":
row.update( {'scoreTime' : scoreTime,
'AUC' : glmScore['validation']['auc'],
'AIC' : glmScore['validation']['aic'],
'error' : glmScore['validation']['err'],
})
else:
row.update( {'scoreTime' : scoreTime,
'AIC' : glmScore['validation']['aic'],
'AUC' : 'NA',
'error' : glmScore['validation']['err'],
})
csvWrt.writerow(row)
finally:
output.close()
if __name__ == '__main__':
dat = sys.argv.pop(-1)
debug = sys.argv.pop(-1)
build = sys.argv.pop(-1)
json = sys.argv[-1].split('/')[-1]
h2o.parse_our_args()
h2o_hosts.build_cloud_with_hosts()
fp = 'Airlines' if 'Air' in dat else 'AllBedrooms'
if dat == 'Air1x' : fs = files['Airlines']['train'][0]
if dat == 'Air10x' : fs = files['Airlines']['train'][1]
if dat == 'Air100x' : fs = files['Airlines']['train'][2]
if dat == 'AllB1x' : fs = files['AllBedrooms']['train'][0]
if dat == 'AllB10x' : fs = files['AllBedrooms']['train'][1]
if dat == 'AllB100x' : fs = files['AllBedrooms']['train'][2]
bench = "bench"
debug = False
if debug:
bench = "bench/debug"
if fp == 'Airlines':
airlinesTestParseStart = time.time()
hK = "AirlinesHeader.csv"
headerPathname = bench+"/Airlines" + "/" + hK
h2i.import_only(bucket='home-0xdiag-datasets', path=headerPathname)
headerKey = h2i.find_key(hK)
testFile = h2i.import_parse(bucket='home-0xdiag-datasets', path=bench +'/Airlines/AirlinesTest.csv', schema='local', hex_key="atest.hex", header=1, header_from_file=headerKey, separator=44, doSummary=False,
timeoutSecs=7200,retryDelaySecs=5, pollTimeoutSecs=7200)
elapsedAirlinesTestParse = time.time() - airlinesTestParseStart
row = {'testParseWallTime' : elapsedAirlinesTestParse}
x = "Year,Month,DayofMonth,DayOfWeek,DepTime,ArrTime,UniqueCarrier,Origin,Dest,Distance"
doGLM(fs, 'Airlines', 'binomial', 'logit', 1E-5, 0.5, 10, 'IsDepDelayed', x, testFile['destination_key'], row)
if fp == 'AllBedrooms':
allBedroomsTestParseStart = time.time()
x = 'areaname,state,metro,count1,count2,count3,count4,count5,count6,count7,count8,count9,count10,count11,count12,count13,count14,count15,count16,count17,count18,count19,count20,count21,count22,count23,count24,count25,count26,count27,count28,count29,count30,count31,count32,count33,count34,count35,count36,count37,count38,count39,count40,count41,count42,count43,count44,count45,count46,count47,count48,count49,count50,count51,count52,count53,count54,count55,count56,count57,count58,count59,count60,count61,count62,count63,count64,count65,count66,count67,count68,count69,count70,count71,count72,count73,count74,count75,count76,count77,count78,count79,count80,count81,count82,count83,count84,count85,count86,count87,count88,count89,count90,count91,count92,count93,count94,count95,count96,count97,count98,count99'
hK = "AllBedroomsHeader.csv"
headerPathname = bench+"/AllBedrooms" + "/" + hK
h2i.import_only(bucket='home-0xdiag-datasets', path=headerPathname)
headerKey = h2i.find_key(hK)
testFile = h2i.import_parse(bucket='home-0xdiag-datasets', path=bench+'/AllBedrooms/AllBedroomsTest.csv', schema='local', hex_key="allBtest.hex", header=1, header_from_file=headerKey, separator=44, doSummary=False,
timeoutSecs=7200,retryDelaySecs=5, pollTimeoutSecs=7200)
elapsedAllBedroomsTestParse = time.time() - allBedroomsTestParseStart
row = {'testParseWallTime' : elapsedAllBedroomsTestParse}
doGLM(fs, 'AllBedrooms', 'gaussian', 'identity', 1E-4, 0.75, 10, 'medrent',x, testFile['destination_key'],row)
h2o.tear_down_cloud()
| apache-2.0 |
codeworldprodigy/lab2 | lib/jinja2/jinja2/visitor.py | 1402 | 3316 | # -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| apache-2.0 |
davidhstocker/Graphyne | Smoketest.py | 1 | 217442 | #!/usr/bin/env python3
"""
Smoketest.py: Regression testing utility for Graphyne. Multiprocessing wrapper for Smokest, allowing multiple simultaneous tests against different persistence types.
"""
from tkinter.test.runtktests import this_dir_path
from graphyne.DatabaseDrivers.DriverTermplate import linkTypes
__author__ = 'David Stocker'
__copyright__ = 'Copyright 2016, David Stocker'
__license__ = 'MIT'
__version__ = '1.0.0'
__maintainer__ = 'David Stocker'
__email__ = 'mrdave991@gmail.com'
__status__ = 'Production'
from xml.dom import minidom
from time import ctime
from os.path import expanduser
import copy
import os
import codecs
import time
import decimal
import queue
import sys
import argparse
#from os.path import expanduser
import graphyne.Graph as Graph
import graphyne.Fileutils as Fileutils
import graphyne.Exceptions as Exceptions
responseQueue = queue.Queue()
entityList = []
api = None
global testImplicit
testImplicit = True
#Globals
#graphDir = expanduser("~")
#graphDir = os.getcwd()
graphDir = os.path.dirname(os.path.abspath(__file__))
testDirPath = os.path.join("Config", "Test")
configDirPath = os.path.join("utils", "Config")
resultFile = None
moduleName = 'Smoketest'
logType = Graph.logTypes.CONTENT
logLevel = Graph.logLevel
class DBError(ValueError):
pass
def testMetaMemeProperty():
method = moduleName + '.' + 'testMetaMemeProperty'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, "MetaMeme_Properties.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
n = n+1
stringArray = str.split(eachReadLine)
testArgumentMap = {stringArray[1] : stringArray[2]}
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, metameme %s" %(n, stringArray[0])])
#colums after 2 can me repeated in pairs. 4/3 and 6/5 can also contain argument/vlaue pairs
try: testArgumentMap[str(stringArray[3])] = str(stringArray[4])
except: pass
try: testArgumentMap[str(stringArray[5])] = str(stringArray[6])
except: pass
try: testArgumentMap[str(stringArray[7])] = str(stringArray[8])
except: pass
try: testArgumentMap[str(stringArray[9])] = str(stringArray[10])
except: pass
try: testArgumentMap[str(stringArray[11])] = str(stringArray[12])
except: pass
removeMe = 'XXX'
try:
del testArgumentMap[removeMe]
except: pass
allTrue = True
errata = []
try:
mmToTest = Graph.templateRepository.templates[stringArray[0]]
props = mmToTest.properties
Graph.logQ.put( [logType , logLevel.DEBUG , method , "testing metameme %s, props = %s" %(mmToTest.path.fullTemplatePath, props)])
for testKey in testArgumentMap.keys():
testType = testArgumentMap[testKey]
Graph.logQ.put( [logType , logLevel.DEBUG , method , "testKey = %s, testType = %s" %(testKey, testType)])
#ToDo: Fix Me. We should not be using temp properties anymore
try:
prop = mmToTest.getProperty(testKey)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "prop = %s" %(prop)])
splitName = testKey.rpartition('.')
if (prop is not None) and (prop.name.find(splitName[2]) < 0):
Graph.logQ.put( [logType , logLevel.DEBUG , method , "property %s and test property %s don't match" %(prop.name, testKey)])
allTrue = False
else:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "property %s and test property %s match" %(prop.name, testKey)])
if prop is not None:
if prop.propertyType != testType:
Graph.logQ.put( [logType , logLevel.WARNING , method , "property %s type %s and testType %s do not match" %(prop.name, prop.propertyType, testType)])
allTrue = False
else:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "property %s type %s and testType %s match" %(prop.name, prop.propertyType, testType)])
else:
Graph.logQ.put( [logType , logLevel.WARNING , method , "property %s is invalid" %(testKey)])
except Exception as e:
Graph.logQ.put( [logType , logLevel.ERROR , method , "Error pulling testkey %s from %s's properties. Traceback = %s" %(testKey, mmToTest.path.fullTemplatePath, e)])
allTrue = False
if allTrue == False:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "testkey %s has no match" %(testKey)])
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(allTrue)
expectedResult = stringArray[13]
results = [n, testcase, allTrueResult, expectedResult, copy.deepcopy(errata)]
resultSet.append(results)
del errata
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testMetaMemeSingleton():
method = moduleName + '.' + 'testMetaMemeSingleton'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, "MetaMeme_Singleton.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, metameme %s" %(n, stringArray[0])])
expectedTestResult = False
if stringArray[1] == 'TRUE':
expectedTestResult = True
Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Metameme %s is expected to be a singleton == %s' %(stringArray[0], expectedTestResult)])
testResult = False
try:
mmToTest = Graph.templateRepository.resolveTemplateAbsolutely(stringArray[0])
if mmToTest.isSingleton == True:
Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Metameme %s is a singleton' %(stringArray[0])])
testResult = True
else:
Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Metameme %s is not a singleton' %(stringArray[0])])
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[1]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testMetaMemeSwitch():
method = moduleName + '.' + 'testMetaMemeSwitch'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, "MetaMeme_Switch.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, metameme %s" %(n, stringArray[0])])
expectedTestResult = False
if stringArray[1] == 'TRUE':
expectedTestResult = True
Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Metameme %s is expected to be a singleton == %s' %(stringArray[0], expectedTestResult)])
testResult = False
try:
mmToTest = Graph.templateRepository.resolveTemplateAbsolutely(stringArray[0])
if mmToTest.isSwitch == True:
Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Metameme %s is a switch' %(stringArray[0])])
testResult = True
else:
Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Metameme %s is not a switch' %(stringArray[0])])
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[1]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testMetaMemeEnhancements():
method = moduleName + '.' + 'testMetaMemeEnhancements'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, "MetaMeme_Enhances.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
testArgumentList = []
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, metameme %s" %(n, stringArray[0])])
#columns 1&2 may contain data
if stringArray[1] != 'XXX':
testArgumentList.append(stringArray[1])
if stringArray[2] != 'XXX':
testArgumentList.append(stringArray[2])
allTrue = False
try:
mmToTest = Graph.templateRepository.resolveTemplateAbsolutely(stringArray[0])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "testing metameme %s, enhancements = %s" %(mmToTest.path.fullTemplatePath, mmToTest.enhances)])
for testArgument in testArgumentList:
#Hack alert! If we have no enhancements in the testcase, the result should be false.
# Hence we initialize to false, but if we actually have test cases, we re-initialize to True
allTrue = True
for testArgument in testArgumentList:
amIextended = Graph.templateRepository.resolveTemplate(mmToTest.path, testArgument)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "checking to see if %s, enhances %s" %(mmToTest.path.fullTemplatePath, amIextended.path.fullTemplatePath)])
#iterate over the enhancement list and see if we have a match
testResult = False
for enhancement in mmToTest.enhances:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "testing enhancement %s against %s" %(enhancement, amIextended.path.fullTemplatePath)])
try:
enhancedMetaMeme = Graph.templateRepository.resolveTemplate(mmToTest.path, enhancement)
if enhancedMetaMeme.path.fullTemplatePath == amIextended.path.fullTemplatePath:
testResult = True
Graph.logQ.put( [logType , logLevel.DEBUG , method , "enhancement %s == %s" %(enhancement, amIextended.path.fullTemplatePath)])
else:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "enhancement %s != %s" %(enhancement, amIextended.path.fullTemplatePath)])
except:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "tested metameme %s extends metameme %s, but is not in the repository." %(enhancement, mmToTest.path.fullTemplatePath)])
if testResult == False:
allTrue = False
if allTrue == False:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "tested metameme %s does not have sought tested enhancement %s" %(mmToTest.path.fullTemplatePath, amIextended.path.fullTemplatePath)])
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(allTrue)
expectedResult = stringArray[3]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testMemeValidity():
method = moduleName + '.' + 'testMemeValidity'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, "Meme_Validity.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
memeValid = False
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, metameme %s" %(n, stringArray[0])])
expectedTestResult = False
if stringArray[1] == 'TRUE':
expectedTestResult = True
try:
memeToTest = Graph.templateRepository.resolveTemplateAbsolutely(stringArray[0])
memeValidReport = memeToTest.validate([])
memeValid = memeValidReport[0]
if expectedTestResult != memeValid:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "testkey %s has an unexpected validity status" %(memeToTest.path.fullTemplatePath)])
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(memeValid)
expectedResult = stringArray[1]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testMemeSingleton():
method = moduleName + '.' + 'testMemeSingleton'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, "Meme_Singleton.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, metameme %s" %(n, stringArray[0])])
expectedTestResult = False
if stringArray[1] == 'TRUE':
expectedTestResult = True
testResult = False
try:
mmToTest = Graph.templateRepository.templates[stringArray[0]]
if expectedTestResult == mmToTest.isSingleton:
if mmToTest.entityUUID is not None:
testResult = True
else:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "meme %s has no deployed entity" %(stringArray[0])])
else:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "meme %s has an unexpected singleton status" %(stringArray[0])])
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[1]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testEntityPhase1(phaseName = 'testEntityPhase1', fName = "Entity_Phase1.atest"):
''' Create the entity from the meme and add it to the entity repo.
Retrieve the entity.
Check to see if it has the properties it is supposed to,
if the type is correct and if the value is correct.
Entity Phase 5 also uses this function
'''
method = moduleName + '.' + phaseName
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, fName)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.INFO , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = False
try:
entityID = Graph.api.createEntityFromMeme(stringArray[0])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Entity UUID = %s" %(entityID)])
propTypeCorrect = False
propValueCorrect = False
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
hasProp = Graph.api.getEntityHasProperty(entityID, stringArray[1])
if hasProp == False:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entity from meme %s does not have property %s" %(entityID, stringArray[1])])
else:
propType = Graph.api.getEntityPropertyType(entityID, stringArray[1])
if stringArray[2] == propType:
propTypeCorrect = True
else:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "property %s in entity from meme %s is wrong type. Expected %s. Got %s" %(stringArray[1], entityID, stringArray[2], propType)])
propValue = Graph.api.getEntityPropertyValue(entityID, stringArray[1])
if propType == 'Boolean':
expValue = False
if stringArray[3].lower() == "true":
expValue = True
if propValue == expValue:
propValueCorrect = True
elif propType == 'Decimal':
expValue = decimal.Decimal(stringArray[3])
if propValue == expValue:
propValueCorrect = True
elif propType == 'Integer':
expValue = int(stringArray[3])
if propValue == expValue:
propValueCorrect = True
else:
if propValue == stringArray[3]:
propValueCorrect = True
if propValueCorrect == False:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "property %s in entity from meme %s is wrong value. Expected %s. Got %s" %(stringArray[1], stringArray[0], stringArray[3], propValue)])
if (propValueCorrect == True) and (propTypeCorrect == True) and (hasProp == True):
testResult = True
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[4]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testEntityPhase1_1(phaseName = 'testEntityPhase1_1', fName = "Entity_Phase1.atest"):
''' a repeat of testEntityPhase1, but using the Python script interface instead of going directly against Graph.api
Tests the following script commands:
createEntityFromMeme
getEntityHasProperty
getEntityPropertyType
getEntityPropertyValue
'''
method = moduleName + '.' + phaseName
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, fName)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.INFO , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = False
try:
#entityID = Graph.api.createEntityFromMeme(stringArray[0])
entityID = api.createEntityFromMeme(stringArray[0])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Entity UUID = %s" %(entityID)])
propTypeCorrect = False
propValueCorrect = False
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
#hasProp = Graph.api.getEntityHasProperty(entityID, stringArray[1])
hasProp = api.getEntityHasProperty(entityID, stringArray[1])
if hasProp == False:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entity from meme %s does not have property %s" %(entityID, stringArray[1])])
else:
#propType = Graph.api.getEntityPropertyType(entityID, stringArray[1])
propType = api.getEntityPropertyType(entityID, stringArray[1])
if stringArray[2] == propType:
propTypeCorrect = True
else:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "property %s in entity from meme %s is wrong type. Expected %s. Got %s" %(stringArray[1], entityID, stringArray[2], propType)])
#propValue = Graph.api.getEntityPropertyValue(entityID, stringArray[1])
propValue = api.getEntityPropertyValue(entityID, stringArray[1])
if propType == 'Boolean':
expValue = False
if stringArray[3].lower() == "true":
expValue = True
if propValue == expValue:
propValueCorrect = True
elif propType == 'Decimal':
expValue = decimal.Decimal(stringArray[3])
if propValue == expValue:
propValueCorrect = True
elif propType == 'Integer':
expValue = int(stringArray[3])
if propValue == expValue:
propValueCorrect = True
else:
if propValue == stringArray[3]:
propValueCorrect = True
if propValueCorrect == False:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "property %s in entity from meme %s is wrong value. Expected %s. Got %s" %(stringArray[1], stringArray[0], stringArray[3], propValue)])
if (propValueCorrect == True) and (propTypeCorrect == True) and (hasProp == True):
testResult = True
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[4]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testEntityPhase2(testPhase = 'testEntityPhase2', fileName = 'Entity_Phase2.atest'):
''' Change the values of the various properties.
Can we change the value to the desired value and are constraints working? '''
method = moduleName + '.' + testPhase
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
testFileName = os.path.join(testDirPath, fileName)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = False
try:
entityID = Graph.api.createEntityFromMeme(stringArray[0])
Graph.api.setEntityPropertyValue(entityID, stringArray[1], stringArray[2])
getter = Graph.api.getEntityPropertyValue(entityID, stringArray[1])
propType = Graph.api.getEntityPropertyType(entityID, stringArray[1])
#reformat the expected result from unicode string to that which is expected in the property
expectedResult = None
if propType == "String":
expectedResult = stringArray[2]
elif propType == "Integer":
expectedResult = int(stringArray[2])
elif propType == "Decimal":
expectedResult = decimal.Decimal(stringArray[2])
else:
expectedResult = False
if str.lower(stringArray[2]) == 'true':
expectedResult = True
#now compare getter to the reformatted stringArray[2] and see if we have successfully altered the property
if getter == expectedResult:
testResult = True
except Exceptions.ScriptError as e:
#Some test cases violate restriction constraints and will raise an exception.
# This works as intended
testResult = False
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[3]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testEntityPhase2_1( testPhase = 'testEntityPhase2_1', fileName = 'Entity_Phase2.atest'):
''' a repeat of testEntityPhase2, but using the Python script interface instead of going directly against Graph.api
Tests the following script commands:
setEntityPropertyValue
getEntityPropertyValue
getEntityPropertyType
'''
method = moduleName + '.' + testPhase
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
testFileName = os.path.join(testDirPath, fileName)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = False
try:
entityID = api.createEntityFromMeme(stringArray[0])
api.setEntityPropertyValue(entityID, stringArray[1], stringArray[2])
getter = api.getEntityPropertyValue(entityID, stringArray[1])
propType = api.getEntityPropertyType(entityID, stringArray[1])
#reformat the expected result from unicode string to that which is expected in the property
expectedResult = None
if propType == "String":
expectedResult = stringArray[2]
elif propType == "Integer":
expectedResult = int(stringArray[2])
elif propType == "Decimal":
expectedResult = decimal.Decimal(stringArray[2])
else:
expectedResult = False
if str.lower(stringArray[2]) == 'true':
expectedResult = True
#now compare getter to the reformatted stringArray[2] and see if we have successfully altered the property
if getter == expectedResult:
testResult = True
except Exceptions.ScriptError:
#Some test cases violate restriction constraints and will raise an exception.
# This works as intended
testResult = False
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[3]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testEntityPhase3():
''' Add and remove properties.
Remove custom properties.
Tests the following script commands:
addEntityDecimalProperty
addEntityIntegerProperty
addEntityStringProperty
addEntityBooleanProperty
removeAllCustomPropertiesFromEntity
removeEntityProperty
Step 1. add a prop and test its existence and value
Step 2. remove that custom prop and check to make sure it is gone (getHasProperty == False)
Step 3. add the prop again, test its existence and then use removeAllCustomPropertiesFromEntity to remove it'''
method = moduleName + '.' + 'testEntityPhase3'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
testFileName = os.path.join(testDirPath, "Entity_Phase3.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = False
step1Result = False
step2Result = False
step3Result = False
try:
entityID = Graph.api.createEntityFromMeme(stringArray[0])
#step 1
if stringArray[2] == "String":
Graph.api.addEntityStringProperty(entityID, stringArray[1], stringArray[3])
expectedResult = stringArray[3]
elif stringArray[2] == "Integer":
Graph.api.addEntityIntegerProperty(entityID, stringArray[1], stringArray[3])
expectedResult = int(stringArray[3])
elif stringArray[2] == "Decimal":
Graph.api.addEntityDecimalProperty(entityID, stringArray[1], stringArray[3])
expectedResult = decimal.Decimal(stringArray[3])
else:
Graph.api.addEntityBooleanProperty(entityID, stringArray[1], stringArray[3])
expectedResult = False
if str.lower(stringArray[3]) == 'true':
expectedResult = True
getter = Graph.api.getEntityPropertyValue(entityID, stringArray[1])
#now compare getter to the reformatted stringArray[2] and see if we have successfully altered the property
if getter == expectedResult:
step1Result = True
#step 2
Graph.api.removeEntityProperty(entityID, stringArray[1])
getter = Graph.api.getEntityHasProperty(entityID, stringArray[1])
if getter == False:
step2Result = True
#step 3
if stringArray[2] == "String":
Graph.api.addEntityStringProperty(entityID, stringArray[1], stringArray[3])
elif stringArray[2] == "Integer":
Graph.api.addEntityIntegerProperty(entityID, stringArray[1], stringArray[3])
elif stringArray[2] == "Decimal":
Graph.api.addEntityDecimalProperty(entityID, stringArray[1], stringArray[3])
else:
Graph.api.addEntityBooleanProperty(entityID, stringArray[1], stringArray[3])
Graph.api.removeAllCustomPropertiesFromEntity(entityID)
getter = Graph.api.getEntityHasProperty(entityID, stringArray[1])
if getter == False:
step3Result = True
except Exceptions.ScriptError as e:
#Some test cases violate restriction constraints and will raise an exception.
# This works as intended
testResult = False
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
if (step1Result == True) and (step2Result == True) and (step3Result == True):
testResult = True
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = "True"
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testEntityPhase3_1():
''' a repeat of testEntityPhase3, but using the Python script interface instead of going directly against Graph.api
Tests the following script commands:
addEntityDecimalProperty
addEntityIntegerProperty
addEntityStringProperty
addEntityBooleanProperty
removeAllCustomPropertiesFromEntity
removeEntityProperty
'''
method = moduleName + '.' + 'testEntityPhase3_1'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
testFileName = os.path.join(testDirPath, "Entity_Phase3.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = False
step1Result = False
step2Result = False
step3Result = False
try:
entityID = api.createEntityFromMeme(stringArray[0])
#step 1
if stringArray[2] == "String":
#Graph.api.addEntityStringProperty(entityID, stringArray[1], stringArray[3])
api.addEntityStringProperty(entityID, stringArray[1], stringArray[3])
expectedResult = stringArray[3]
elif stringArray[2] == "Integer":
#Graph.api.addEntityIntegerProperty(entityID, stringArray[1], stringArray[3])
api.addEntityIntegerProperty(entityID, stringArray[1], stringArray[3])
expectedResult = int(stringArray[3])
elif stringArray[2] == "Decimal":
#Graph.api.addEntityDecimalProperty(entityID, stringArray[1], stringArray[3])
api.addEntityDecimalProperty(entityID, stringArray[1], stringArray[3])
expectedResult = decimal.Decimal(stringArray[3])
else:
Graph.api.addEntityBooleanProperty(entityID, stringArray[1], stringArray[3])
expectedResult = False
if str.lower(stringArray[3]) == 'true':
expectedResult = True
#getter = Graph.api.getEntityPropertyValue(entityID, stringArray[1])
getter = api.getEntityPropertyValue(entityID, stringArray[1])
#now compare getter to the reformatted stringArray[2] and see if we have successfully altered the property
if getter == expectedResult:
step1Result = True
#step 2
#Graph.api.removeEntityProperty(entityID, stringArray[1])
#getter = Graph.api.getEntityHasProperty(entityID, stringArray[1])
api.removeEntityProperty(entityID, stringArray[1])
getter = api.getEntityHasProperty(entityID, stringArray[1])
if getter == False:
step2Result = True
#step 3
if stringArray[2] == "String":
#Graph.api.addEntityStringProperty(entityID, stringArray[1], stringArray[3])
api.addEntityStringProperty(entityID, stringArray[1], stringArray[3])
elif stringArray[2] == "Integer":
#Graph.api.addEntityIntegerProperty(entityID, stringArray[1], stringArray[3])
api.addEntityIntegerProperty(entityID, stringArray[1], stringArray[3])
elif stringArray[2] == "Decimal":
#Graph.api.addEntityDecimalProperty(entityID, stringArray[1], stringArray[3])
api.addEntityIntegerProperty(entityID, stringArray[1], stringArray[3])
else:
#Graph.api.addEntityBooleanProperty(entityID, stringArray[1], stringArray[3])
api.addEntityBooleanProperty(entityID, stringArray[1], stringArray[3])
#Graph.api.removeAllCustomPropertiesFromEntity(entityID)
#getter = Graph.api.getEntityHasProperty(entityID, stringArray[1])
api.removeAllCustomPropertiesFromEntity(entityID)
getter = api.getEntityHasProperty(entityID, stringArray[1])
if getter == False:
step3Result = True
except Exceptions.ScriptError:
#Some test cases violate restriction constraints and will raise an exception.
# This works as intended
testResult = False
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
if (step1Result == True) and (step2Result == True) and (step3Result == True):
testResult = True
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = "True"
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testEntityPhase4():
''' Revert the entity to original condition.
Tests the following script commands:
revertEntityPropertyValues
Step 1. change a standard value
Step 2. use revertEntityPropertyValues to return it to stock'''
method = moduleName + '.' + 'testEntityPhase4'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
testFileName = os.path.join(testDirPath, "Entity_Phase4.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = False
try:
entityID = Graph.api.createEntityFromMeme(stringArray[0])
baseValue = Graph.api.getEntityPropertyValue(entityID, stringArray[1])
Graph.api.setEntityPropertyValue(entityID, stringArray[1], stringArray[2])
getter = Graph.api.getEntityPropertyValue(entityID, stringArray[1])
propType = Graph.api.getEntityPropertyType(entityID, stringArray[1])
#reformat the expected result from unicode string to that which is expected in the property
expectedResult = None
if propType == "String":
expectedResult = stringArray[2]
elif propType == "Integer":
expectedResult = int(stringArray[2])
elif propType == "Decimal":
expectedResult = decimal.Decimal(stringArray[2])
else:
expectedResult = False
if str.lower(stringArray[2]) == 'true':
expectedResult = True
#now compare getter to the reformatted stringArray[2] and see if we have successfully altered the property
if getter == expectedResult:
Graph.api.revertEntityPropertyValues(entityID, False)
getter = Graph.api.getEntityPropertyValue(entityID, stringArray[1])
if getter == baseValue:
testResult = True
except Exceptions.ScriptError:
#Some test cases violate restriction constraints and will raise an exception.
# This works as intended
testResult = False
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = "True"
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testEntityPhase4_1():
''' a repeat of testEntityPhase3, but using the Python script interface instead of going directly against Graph.api '''
method = moduleName + '.' + 'testEntityPhase4.1'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
testFileName = os.path.join(testDirPath, "Entity_Phase4.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = False
try:
entityID = api.createEntityFromMeme(stringArray[0])
baseValue = api.getEntityPropertyValue(entityID, stringArray[1])
api.setEntityPropertyValue(entityID, stringArray[1], stringArray[2])
getter = api.getEntityPropertyValue(entityID, stringArray[1])
propType = api.getEntityPropertyType(entityID, stringArray[1])
#reformat the expected result from unicode string to that which is expected in the property
expectedResult = None
if propType == "String":
expectedResult = stringArray[2]
elif propType == "Integer":
expectedResult = int(stringArray[2])
elif propType == "Decimal":
expectedResult = decimal.Decimal(stringArray[2])
else:
expectedResult = False
if str.lower(stringArray[2]) == 'true':
expectedResult = True
#now compare getter to the reformatted stringArray[2] and see if we have successfully altered the property
if getter == expectedResult:
api.revertEntityPropertyValues(entityID, False)
getter = api.getEntityPropertyValue(entityID, stringArray[1])
if getter == baseValue:
testResult = True
except Exceptions.ScriptError:
#Some test cases violate restriction constraints and will raise an exception.
# This works as intended
testResult = False
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = "True"
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testRevertEntity():
''' a repeat of the testEntityPhase4 tests, but using revertEntity'''
method = moduleName + '.' + 'testRevertEntity'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
testFileName = os.path.join(testDirPath, "Entity_Phase4.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
#First, re-run the 4 tests with revertEntity()
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = True
try:
entityID = api.createEntityFromMeme(stringArray[0])
baseValue = api.getEntityPropertyValue(entityID, stringArray[1])
api.setEntityPropertyValue(entityID, stringArray[1], stringArray[2])
getter = api.getEntityPropertyValue(entityID, stringArray[1])
propType = api.getEntityPropertyType(entityID, stringArray[1])
#reformat the expected result from unicode string to that which is expected in the property
expectedResult = None
if propType == "String":
expectedResult = stringArray[2]
elif propType == "Integer":
expectedResult = int(stringArray[2])
elif propType == "Decimal":
expectedResult = decimal.Decimal(stringArray[2])
else:
expectedResult = False
if str.lower(stringArray[2]) == 'true':
expectedResult = True
#now compare getter to the reformatted stringArray[2] and see if we have successfully altered the property
if getter == expectedResult:
api.revertEntity(entityID, False)
getter = api.getEntityPropertyValue(entityID, stringArray[1])
if getter != baseValue:
testResult = False
except Exceptions.ScriptError:
#Some test cases violate restriction constraints and will raise an exception.
# This works as intended
testResult = False
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = "True"
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
#Second, test with a custom property with revertEntity()
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = True
try:
entityID = api.createEntityFromMeme(stringArray[0])
#Create a property named after the current n count and give it the n value
currValue = "%s" %n
Graph.api.addEntityIntegerProperty(entityID, currValue, currValue)
getter = Graph.api.getEntityHasProperty(entityID, currValue)
if getter != True:
testResult = False
Graph.api.revertEntity(entityID, currValue)
getter = Graph.api.getEntityHasProperty(entityID, currValue)
if getter == True:
testResult = False
except Exceptions.ScriptError:
#Some test cases violate restriction constraints and will raise an exception.
# This works as intended
testResult = False
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = "True"
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
#Lastly, rerun test 4 and then add a property and test revertEntity()
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = True
try:
entityID = api.createEntityFromMeme(stringArray[0])
baseValue = api.getEntityPropertyValue(entityID, stringArray[1])
api.setEntityPropertyValue(entityID, stringArray[1], stringArray[2])
getter = api.getEntityPropertyValue(entityID, stringArray[1])
propType = api.getEntityPropertyType(entityID, stringArray[1])
#reformat the expected result from unicode string to that which is expected in the property
expectedResult = None
if propType == "String":
expectedResult = stringArray[2]
elif propType == "Integer":
expectedResult = int(stringArray[2])
elif propType == "Decimal":
expectedResult = decimal.Decimal(stringArray[2])
else:
expectedResult = False
if str.lower(stringArray[2]) == 'true':
expectedResult = True
#Create a property named after the current n count and give it the n value
currValue = "%s" %n
Graph.api.addEntityIntegerProperty(entityID, currValue, currValue)
getter = Graph.api.getEntityHasProperty(entityID, currValue)
if getter != True:
testResult = False
#now compare getter to the reformatted stringArray[2] and see if we have successfully altered the property
if getter == expectedResult:
api.revertEntity(entityID, False)
getter = api.getEntityPropertyValue(entityID, stringArray[1])
if getter != baseValue:
testResult = False
#Make sure the custom property is gone
Graph.api.revertEntity(entityID, currValue)
getter = Graph.api.getEntityHasProperty(entityID, currValue)
if getter == True:
testResult = False
except Exceptions.ScriptError:
#Some test cases violate restriction constraints and will raise an exception.
# This works as intended
testResult = False
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = "True"
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testEntityPhase6():
''' Check and see if the meme is a singleton
Tests getMemeIsSingleton
Tests getEntityFromMeme in singleton context
Strategy -
If the meme is a singleton, then it should have had an entity created already
1 - Is the meme a singleton?
2a - If not, then entity.uuid should be non-existent
2b - If so, then entity.uuid should have a UUID
3b - create an entiity
4b - is the UUID the same as before? It should be
'''
method = moduleName + '.' + 'testEntityPhase6'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
testFileName = os.path.join(testDirPath, "Entity_Phase6.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
expectedTestResult = False
if stringArray[1] == 'TRUE':
expectedTestResult = True
testResult = False
mSingletonFlagCorrect = False
mEntityUUIDCorrect = False
eSingletonFlagCorrect = False
eSameUUIDasInMeme = False
try:
isSingleton = Graph.api.getIsMemeSingleton(stringArray[0])
if expectedTestResult == isSingleton:
mSingletonFlagCorrect = True
meme = Graph.templateRepository.resolveTemplateAbsolutely(stringArray[0])
oldEntityID = None
#Is the meme a singleton?
if isSingleton == False:
#2a - If not, then entity.uuid should be non-existent
try:
if meme.entityUUID is None:
mEntityUUIDCorrect = True
except:
mEntityUUIDCorrect = True
else:
#2b - If so, then entity.uuid should have a UUID
if meme.entityUUID is not None:
mEntityUUIDCorrect = True
oldEntityID = meme.entityUUID
entityID = Graph.api.createEntityFromMeme(stringArray[0])
entityIsSingleton = Graph.api.getIsEntitySingleton(entityID)
if isSingleton == False:
if entityIsSingleton == False:
eSingletonFlagCorrect = True
eSameUUIDasInMeme = True
else:
if (entityIsSingleton == True) and (entityID == oldEntityID):
eSingletonFlagCorrect = True
eSameUUIDasInMeme = True
#now compare getter to the reformatted stringArray[2] and see if we have successfully altered the property
if (mSingletonFlagCorrect == True) and (mEntityUUIDCorrect == True) and (eSingletonFlagCorrect == True) and (eSameUUIDasInMeme == True):
testResult = True
except Exceptions.ScriptError:
#Some test cases violate restriction constraints and will raise an exception.
# This works as intended
testResult = False
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = "True"
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testEntityPhase6_1():
''' Repeat 6 using python script interface.
Tests the following script functions:
getIsEntitySingleton
getIsMemeSingleton
'''
method = moduleName + '.' + 'testEntityPhase6.1'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
testFileName = os.path.join(testDirPath, "Entity_Phase6.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
expectedTestResult = False
if stringArray[1] == 'TRUE':
expectedTestResult = True
testResult = False
mSingletonFlagCorrect = False
mEntityUUIDCorrect = False
eSingletonFlagCorrect = False
eSameUUIDasInMeme = False
try:
isSingleton = api.getIsMemeSingleton(stringArray[0])
if expectedTestResult == isSingleton:
mSingletonFlagCorrect = True
meme = Graph.templateRepository.resolveTemplateAbsolutely(stringArray[0])
oldEntityID = None
#Is the meme a singleton?
if isSingleton == False:
#2a - If not, then entity.uuid should be non-existent
try:
if meme.entityUUID is None:
mEntityUUIDCorrect = True
except:
mEntityUUIDCorrect = True
else:
#2b - If so, then entity.uuid should have a UUID
if meme.entityUUID is not None:
mEntityUUIDCorrect = True
oldEntityID = meme.entityUUID
entityID = api.createEntityFromMeme(stringArray[0])
entityIsSingleton = api.getIsEntitySingleton(entityID)
if isSingleton == False:
if entityIsSingleton == False:
eSingletonFlagCorrect = True
eSameUUIDasInMeme = True
else:
if (entityIsSingleton == True) and (entityID == oldEntityID):
eSingletonFlagCorrect = True
eSameUUIDasInMeme = True
#now compare getter to the reformatted stringArray[2] and see if we have successfully altered the property
if (mSingletonFlagCorrect == True) and (mEntityUUIDCorrect == True) and (eSingletonFlagCorrect == True) and (eSameUUIDasInMeme == True):
testResult = True
except Exceptions.ScriptError:
#Some test cases violate restriction constraints and will raise an exception.
# This works as intended
testResult = False
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = "True"
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testEntityPhase7(phaseName = 'testEntityPhase7', fName = "Entity_Phase7.atest"):
''' Create entities from the meme in the first two colums.
Add a link between the two at the location on entity in from column 3.
Check and see if each is a counterpart as seen from the other using the addresses in columns 4&5 (CheckPath & Backpath)
& the filter.
The filter must be the same as the type of link (or None)
The check location must be the same as the added loation.
'''
method = moduleName + '.' + phaseName
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
lresultSet = []
del lresultSet[:]
#try:
testFileName = os.path.join(testDirPath, fName)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.INFO , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = False
try:
entityID0 = Graph.api.createEntityFromMeme(stringArray[0])
entityID1 = Graph.api.createEntityFromMeme(stringArray[1])
#Attach entityID1 at the mount point specified in stringArray[2]
if stringArray[2] != "X":
mountPoints = api.getLinkCounterpartsByType(entityID0, stringArray[2], 0)
unusedMountPointsOverview = {}
for mountPoint in mountPoints:
try:
mpMemeType = api.getEntityMemeType(mountPoint)
unusedMountPointsOverview[mountPoint] = mpMemeType
except Exception as e:
#errorMessage = "debugHelperMemeType warning in Smoketest.testEntityPhase7. Traceback = %s" %e
#Graph.logQ.put( [logType , logLevel.WARNING , method , errorMessage])
raise e
for mountPoint in mountPoints:
api.addEntityLink(mountPoint, entityID1, {}, int(stringArray[5]))
else:
api.addEntityLink(entityID0, entityID1, {}, int(stringArray[5]))
backTrackCorrect = False
linkType = None
if stringArray[6] != "X":
linkType = int(stringArray[6])
#see if we can get from entityID0 to entityID1 via stringArray[3]
addLocationCorrect = False
addLocationList = api.getLinkCounterpartsByType(entityID0, stringArray[3], linkType)
if len(addLocationList) > 0:
addLocationCorrect = True
#see if we can get from entityID1 to entityID0 via stringArray[4]
backTrackCorrect = False
backTrackLocationList = api.getLinkCounterpartsByType(entityID1, stringArray[4], linkType)
if len(backTrackLocationList) > 0:
backTrackCorrect = True
if (backTrackCorrect == True) and (addLocationCorrect == True):
testResult = True
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[7]
results = [n, testcase, allTrueResult, expectedResult, errata]
lresultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return lresultSet
def testLinkCounterpartsByMetaMemeType(phaseName = 'LinkCounterpartsByMetaMemeType', fName = "LinkCounterpartsByMetaMemeType.atest"):
''' Repeat Phase 7, but traversing with metameme paths, instead of meme paths.
LinkCounterpartsByMetaMemeType.atest differs from TestEntityPhase7.atest only in that cols D and E use metameme paths.
Create entities from the meme in the first two colums.
Add a link between the two at the location on entity in from column 3.
Check and see if each is a counterpart as seen from the other using the addresses in columns 4&5 (CheckPath & Backpath)
& the filter.
The filter must be the same as the type of link (or None)
The check location must be the same as the added loation.
'''
method = moduleName + '.' + phaseName
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
lresultSet = []
del lresultSet[:]
#try:
testFileName = os.path.join(testDirPath, fName)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.INFO , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = False
try:
entityID0 = Graph.api.createEntityFromMeme(stringArray[0])
entityID1 = Graph.api.createEntityFromMeme(stringArray[1])
#Attach entityID1 at the mount point specified in stringArray[2]
if stringArray[2] != "X":
mountPoints = api.getLinkCounterpartsByType(entityID0, stringArray[2], 0)
unusedMountPointsOverview = {}
for mountPoint in mountPoints:
try:
mpMemeType = api.getEntityMemeType(mountPoint)
unusedMountPointsOverview[mountPoint] = mpMemeType
except Exception as e:
#errorMessage = "debugHelperMemeType warning in Smoketest.testEntityPhase7. Traceback = %s" %e
#Graph.logQ.put( [logType , logLevel.WARNING , method , errorMessage])
raise e
for mountPoint in mountPoints:
api.addEntityLink(mountPoint, entityID1, {}, int(stringArray[5]))
else:
api.addEntityLink(entityID0, entityID1, {}, int(stringArray[5]))
backTrackCorrect = False
linkType = None
if stringArray[6] != "X":
linkType = int(stringArray[6])
#see if we can get from entityID0 to entityID1 via stringArray[3]
addLocationCorrect = False
addLocationList = api.getLinkCounterpartsByMetaMemeType(entityID0, stringArray[3], linkType)
if len(addLocationList) > 0:
addLocationCorrect = True
#see if we can get from entityID1 to entityID0 via stringArray[4]
backTrackCorrect = False
backTrackLocationList = api.getLinkCounterpartsByMetaMemeType(entityID1, stringArray[4], linkType)
if len(backTrackLocationList) > 0:
backTrackCorrect = True
if (backTrackCorrect == True) and (addLocationCorrect == True):
testResult = True
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[7]
results = [n, testcase, allTrueResult, expectedResult, errata]
lresultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return lresultSet
def testEntityPhase9(phaseName = 'testEntityPhase9', fName = "Entity_Phase9.atest"):
''' A modified phase 7 test with entity link removal after testing.
Add a link between the two at the location on entity in from column 3.
Check and see if each is a counterpart as seen from the other using the addresses in columns 4&5 (CheckPath & Backpath)
& the filter.
The filter must be the same as the type of link (or None)
The check location must be the same as the added loation.
(So far, so good. this is the same as in phase 7)
added:
Now remove the link
Check again to make sure that the link no longer exists
'''
method = moduleName + '.' + phaseName
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, fName)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.INFO , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
part1TestResult = False
testResult = False
try:
entityID0 = Graph.api.createEntityFromMeme(stringArray[0])
entityID1 = Graph.api.createEntityFromMeme(stringArray[1])
#Attach entityID1 at the mount point specified in stringArray[2]
rememberMe = {}
mountPoints = api.getLinkCounterpartsByType(entityID0, stringArray[2], 0)
for mountPoint in mountPoints:
api.addEntityLink(mountPoint, entityID1, {}, int(stringArray[5]))
rememberMe[mountPoint] = entityID1
backTrackCorrect = False
linkType = None
if stringArray[6] != "X":
linkType = int(stringArray[6])
addLocationCorrect = False
addLocationList = api.getLinkCounterpartsByType(entityID0, stringArray[3], linkType)
if len(addLocationList) > 0:
addLocationCorrect = True
#see if we can get from entityID1 to entityID0 via stringArray[4]
backTrackCorrect = False
backTrackLocationList = api.getLinkCounterpartsByType(entityID1, stringArray[4], linkType)
if len(backTrackLocationList) > 0:
backTrackCorrect = True
if (backTrackCorrect == True) and (addLocationCorrect == True):
part1TestResult = True
#Time for phase 2
#Now remove that added member. This is why we kept track of that added member; to speed up removal
for mountPoint in rememberMe.keys():
api.removeEntityLink(mountPoint, entityID1)
secondAddLocationCorrect = False
addLocationList = api.getLinkCounterpartsByType(entityID0, stringArray[3], linkType)
if len(addLocationList) == 0:
secondAddLocationCorrect = True
if (part1TestResult == True) and (secondAddLocationCorrect == True):
testResult = True
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[7]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testEntityPhase10(phaseName = 'testEntityPhase10', fName = "Entity_Phase10.atest"):
""" Create two entities from the meme in the first two colums.
Both will should have the same singleton in their association (link) networks
Try to traverse from one to the other
This tests the 'singleton bridge' with respect to souble and triple wildcards
"""
method = moduleName + '.' + phaseName
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, fName)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.INFO , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = False
try:
entityID0 = Graph.api.createEntityFromMeme(stringArray[0])
trackLocationList = api.getLinkCounterpartsByType(entityID0, stringArray[2], None)
if len(trackLocationList) > 0:
testResult = True
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[3]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testTraverseParams(phaseName = 'testTraverseParams', fName = "TraverseWithParams.atest"):
""" Create a TraverseParameters.A and TraverseParameters.B. Attach them and assign values to the edges (links).
Then fpor each test case:
1 -Try to select A (with or without params, depending on the test case)
2 -Try to navigate to B (with or without node/traverse params, depending on the test case
3 -Compare our cuccessful reaching of B with the expected outcome.
"""
method = moduleName + '.' + phaseName
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, fName)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = eachReadLine.split(' | ')
Graph.logQ.put( [logType , logLevel.INFO , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
if n == 40:
unusedCatch = True
testResult = False
try:
entityID0 = Graph.api.createEntityFromMeme("TraverseParameters.A")
entityID1 = Graph.api.createEntityFromMeme("TraverseParameters.B")
Graph.api.addEntityLink(entityID0, entityID1, {'a':4}, 0)
if n == 70:
unusedCatchMe = True
traversePath = stringArray[0].strip()
trackLocationList = api.getLinkCounterpartsByType(entityID0, traversePath, None)
if len(trackLocationList) > 0:
testResult = True
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[1].strip()
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testNumericValue(filename):
#NumericValue.atest
method = moduleName + '.' + 'testNumericValue'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, filename)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
unicodeReadLine = str(eachReadLine)
stringArray = str.split(unicodeReadLine)
testArgumentMap = {}
testResult = False
try:
entityIDList = api.getEntitiesByMemeType(stringArray[0])
for entityIDListEntry in entityIDList:
entityID = entityIDListEntry
numberListS = api.evaluateEntity(entityID, testArgumentMap)
numberList = []
for numberString in numberListS:
dec = decimal.Decimal(numberString)
numberList.append(dec)
argAsDecimal = decimal.Decimal(stringArray[1])
if argAsDecimal in numberList:
testResult = True
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[2]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testImplicitMeme(phaseName = 'testImplicitMeme', fName = "ImplicitMeme.atest"):
''' Create entities from the meme in the first two colums.
Add a link between the two at the location on entity in from column 3, if it is not direct. Otherwise diorectly to entity 0
Check and see if each is a counterpart as seen from the other using the addresses in columns 4&5 (CheckPath & Backpath)
& the filter.
The filter must be the same as the type of link (or None)
The check location must be the same as the added loation.
'''
method = moduleName + '.' + phaseName
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, fName)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.INFO , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
#debug
#print ("Starting testcase %s, meme %s" %(n, stringArray[0]))
#if n == 30:
# pass
#/debug
testResult = False
try:
try:
entityID0 = Graph.api.createEntityFromMeme(stringArray[0])
except Exception as e:
raise DBError(stringArray[0])
try:
entityID1 = Graph.api.createEntityFromMeme(stringArray[1])
except Exception as e:
raise DBError(stringArray[1])
#Attach entityID1 at the mount point specified in stringArray[2]
if (stringArray[2] != '**DIRECT**'):
mountPoints = api.getLinkCounterpartsByType(entityID0, stringArray[2], 0)
for mountPoint in mountPoints:
api.addEntityLink(mountPoint, entityID1)
else:
#If we have a **DIRECT** mount, then attach entity 1 to entity 0
api.addEntityLink(entityID0, entityID1)
backTrackCorrect = False
linkType = None
#see if we can get from entityID0 to entityID1 via stringArray[3]
addLocationCorrect = False
addLocationList = api.getLinkCounterpartsByType(entityID0, stringArray[3], linkType)
if len(addLocationList) > 0:
addLocationCorrect = True
#see if we can get from entityID1 to entityID0 via stringArray[4]
backTrackCorrect = False
backTrackLocationList = api.getLinkCounterpartsByType(entityID1, stringArray[4], linkType)
if len(backTrackLocationList) > 0:
backTrackCorrect = True
if (backTrackCorrect == True) and (addLocationCorrect == True):
testResult = True
except DBError as e:
errorMsg = ('Database Error! Check to see if the Database has been started and that meme %s is in the appropriate table.' % (e) )
errata.append(errorMsg)
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[2])
allTrueResult = str(testResult)
expectedResult = stringArray[5]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testCondition(filename):
method = moduleName + '.' + 'testCondition'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, filename)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
unicodeReadLine = str(eachReadLine)
stringArray = str.split(unicodeReadLine)
entityIDList = api.getEntitiesByMemeType(stringArray[0])
for entityIDListEntry in entityIDList:
testArgumentMap = {stringArray[2] : stringArray[1]}
try:
testArgumentMap[stringArray[4]] = stringArray[3]
except:
pass
try:
testArgumentMap[stringArray[6]] = stringArray[5]
except:
pass
try:
del testArgumentMap['XXX']
except:
pass
testResult = False
try:
entityIDList = api.getEntitiesByMemeType(stringArray[0])
for entityIDListEntry in entityIDList:
entityID = entityIDListEntry
testResult = api.evaluateEntity(entityID, testArgumentMap)
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[7]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testAACondition(filename):
method = moduleName + '.' + 'testAACondition'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, filename)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
unicodeReadLine = str(eachReadLine)
stringArray = str.split(unicodeReadLine)
testArgumentMap = {}
subjectID = api.createEntityFromMeme(stringArray[1])
objectID = None
try:
objectID = Graph.api.createEntityFromMeme(stringArray[2])
except:
pass
if objectID is None:
objectID = subjectID
try:
del testArgumentMap['XXX']
except:
pass
testResult = False
try:
entityIDList = api.getEntitiesByMemeType(stringArray[0])
for entityIDListEntry in entityIDList:
cEntityID = entityIDListEntry
testResult = api.evaluateEntity(cEntityID, testArgumentMap, None, subjectID, objectID)
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[3]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testSourceCreateMeme(filename):
method = moduleName + '.' + 'testSourceCreateMeme'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, filename)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
#Phase 1 - explicit Metameme and Meme declaration
for eachReadLine in allLines:
errata = []
n = n+1
unicodeReadLine = str(eachReadLine)
stringArray = str.split(unicodeReadLine)
metamemePath = stringArray[0]
modulePath = stringArray[1]
memeName = stringArray[2]
operationResult = {}
testResult = False
try:
operationResult = api.sourceMemeCreate(memeName, modulePath, metamemePath)
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
operationResult = {"memeID" : "%s.%s" %(modulePath, memeName), "ValidationResults" : [False, errorMsg]}
errata.append(errorMsg)
testcase = str(operationResult["memeID"])
validation = operationResult["ValidationResults"]
if validation[0] == True:
testResult = True
else:
testResult = False
errata = validation[1]
allTrueResult = str(testResult)
expectedResult = stringArray[3]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
#Phase 2 - Default Metameme, default module
testResult = False
memeName = "DefaultMetamemeMeme"
try:
operationResult = api.sourceMemeCreate(memeName)
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
operationResult = {"memeID" : "%s.%s" %("Graphyne", memeName), "ValidationResults" : [False, errorMsg]}
errata.append(errorMsg)
testcase = str(operationResult["memeID"])
validation = operationResult["ValidationResults"]
if validation[0] == True:
testResult = True
else:
testResult = False
errata = validation[1]
allTrueResult = str(testResult)
expectedResult = "True"
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
#Phase 3 - Default Metameme, custom module
testResult = False
try:
operationResult = api.sourceMemeCreate(memeName, "CustomModule")
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
operationResult = {"memeID" : "%s.%s" %("Graphyne", memeName), "ValidationResults" : [False, errorMsg]}
errata.append(errorMsg)
testcase = str(operationResult["memeID"])
validation = operationResult["ValidationResults"]
if validation[0] == True:
testResult = True
else:
testResult = False
errata = validation[1]
allTrueResult = str(testResult)
expectedResult = "True"
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testSourceProperty(filename):
method = moduleName + '.' + 'testSourceProperty'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, filename)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
unicodeReadLine = str(eachReadLine)
stringArray = str.split(unicodeReadLine)
metamemePath = stringArray[0]
modulePath = stringArray[1]
memeName = stringArray[2]
propName = stringArray[3]
propValueStr = stringArray[4]
operationResult = {}
testResult = "False"
try:
sourceMeme = api.sourceMemeCreate(memeName, modulePath, metamemePath)
operationResult = api.sourceMemePropertySet(sourceMeme["memeID"], propName, propValueStr)
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
operationResult = {"memeID" : "%s.%s" %(modulePath, memeName), "ValidationResults" : [False, errorMsg]}
errata.append(errorMsg)
testcase = "%s with property %s, %s" %(testResult[0], propName, propValueStr)
validation = operationResult["ValidationResults"]
if validation[0] == True:
testResult = str(True)
else:
testResult = str(False)
errata = validation[1]
expectedResult = stringArray[5]
results = [n, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testSourcePropertyRemove(filename):
method = moduleName + '.' + 'testSourcePropertyRemove'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, filename)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
unicodeReadLine = str(eachReadLine)
stringArray = str.split(unicodeReadLine)
metamemePath = stringArray[0]
modulePath = "%s_remove" %stringArray[1]
memeName = stringArray[2]
propName = stringArray[3]
propValueStr = stringArray[4]
sourceMeme = []
testResult = str(False)
try:
sourceMeme = api.sourceMemeCreate(memeName, modulePath, metamemePath)
unusedAddProp = api.sourceMemePropertySet(sourceMeme["memeID"], propName, propValueStr)
operationResult = api.sourceMemePropertyRemove(sourceMeme["memeID"], propName)
#list: [u'SourceProperty1_remove.L', [True, []]]
validation = operationResult["ValidationResults"]
if validation[0] == True:
testResult = str(True)
else:
testResult = str(False)
errata = validation[1]
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
operationResult = {"memeID" : "%s.%s" %(modulePath, memeName), "ValidationResults" : [False, errorMsg]}
errata.append(errorMsg)
testcase = "%s with property %s, %s removed" %(sourceMeme["memeID"], propName, propValueStr)
expectedResult = stringArray[5]
results = [n, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testSourceMember(filename):
method = moduleName + '.' + 'testSourceMember'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, filename)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
#e.g. (Examples.M, SourceMember3, M, Examples.L, SourceMember3, L, 2, False)
errata = []
n = n+1
unicodeReadLine = str(eachReadLine)
stringArray = str.split(unicodeReadLine)
metamemePath = stringArray[0]
modulePath = stringArray[1]
memeName = stringArray[2]
memberMetamemePath = stringArray[3]
memberModulePath = stringArray[4]
memberMemeName = stringArray[5]
occurrence = stringArray[6]
sourceMeme = ['']
sourceMemberMeme = ['']
testResult = str(False)
try:
sourceMeme = api.sourceMemeCreate(memeName, modulePath, metamemePath)
sourceMemberMeme = api.sourceMemeCreate(memberMemeName, memberModulePath, memberMetamemePath)
operationResult = api.sourceMemeMemberAdd(sourceMeme["memeID"], sourceMemberMeme["memeID"], occurrence)
validation = operationResult["ValidationResults"]
if validation[0] == True:
testResult = str(True)
else:
testResult = str(False)
errata = validation[1]
except Exception as e:
errorMsg = ('Error in testcase testSourceMember! Traceback = %s' % (e) )
api.writeError(errorMsg)
errata.append(errorMsg)
testcase = "%s has member %s" %(sourceMeme["memeID"], sourceMemberMeme["memeID"])
expectedResult = stringArray[7]
results = [n, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testSourceMemberRemove(filename):
method = moduleName + '.' + 'testSourceMemberRemove'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, filename)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
unicodeReadLine = str(eachReadLine)
stringArray = str.split(unicodeReadLine)
metamemePath = stringArray[0]
modulePath = "%s_remove" %stringArray[1]
memeName = stringArray[2]
memberMetamemePath = stringArray[3]
memberModulePath = "%s_remove" %stringArray[4]
memberMemeName = stringArray[5]
occurrence = stringArray[6]
sourceMeme = ['']
sourceMemberMeme = ['']
testResult = str(False)
try:
sourceMeme = api.sourceMemeCreate(memeName, modulePath, metamemePath)
sourceMemberMeme = api.sourceMemeCreate(memberMemeName, memberModulePath, memberMetamemePath)
unusedAdd = api.sourceMemeMemberAdd(sourceMeme["memeID"], sourceMemberMeme["memeID"], occurrence)
operationResult = api.sourceMemeMemberRemove(sourceMeme["memeID"], sourceMemberMeme["memeID"])
validation = operationResult["ValidationResults"]
if validation[0] == True:
testResult = str(True)
else:
testResult = str(False)
errata = validation[1]
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
operationResult = {"memeID" : "%s.%s" %(modulePath, memeName), "ValidationResults" : [False, errorMsg]}
errata.append(errorMsg)
testcase = "%s has member %s" %(sourceMeme["memeID"], sourceMemberMeme["memeID"])
expectedResult = "True"
results = [n, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testSourceEnhancement(filename):
method = moduleName + '.' + 'testSourceEnhancement'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, filename)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
unicodeReadLine = str(eachReadLine)
stringArray = str.split(unicodeReadLine)
metamemePath = stringArray[0]
modulePath = stringArray[1]
memeName = stringArray[2]
enhancedMetamemePath = stringArray[3]
enhancedModulePath = stringArray[4]
enhancedMemeName = stringArray[5]
sourceMeme = ['']
sourceMemberMeme = ['']
testResult = str(False)
try:
sourceMeme = api.sourceMemeCreate(memeName, modulePath, metamemePath)
sourceMemberMeme = api.sourceMemeCreate(enhancedMemeName, enhancedModulePath, enhancedMetamemePath)
operationResult = api.sourceMemeEnhancementAdd(sourceMeme["memeID"], sourceMemberMeme["memeID"])
validation = operationResult["ValidationResults"]
if validation[0] == True:
testResult = str(True)
else:
testResult = str(False)
errata = validation[1]
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
operationResult = {"memeID" : "%s.%s" %(modulePath, memeName), "ValidationResults" : [False, errorMsg]}
errata.append(errorMsg)
testcase = "%s enhancing %s" %(sourceMeme["memeID"], sourceMemberMeme["memeID"])
expectedResult = stringArray[6]
results = [n, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
#Part 2 - Create two generic memes and use one to enhance the other
# Create the two memes
# Add a property to each
# Create entities from the two memes
# Check to ensure that they have the peoper properties
# Use one meme to enhanece the other.
# Create a new entity.
# Test that it has all properties
part2AllTrue = True
# Create the two memes
enhancingMeme = api.sourceMemeCreate("Enhancing")
enhancedMeme = api.sourceMemeCreate("Enhanced")
testcase = "Generic enhancing Generic"
try:
# Add a property to each
api.sourceMemePropertySet(enhancingMeme["memeID"], "A", "A")
api.sourceMemePropertySet(enhancedMeme["memeID"], "B", "B")
# Create entities from the two memes
entityA = api.createEntityFromMeme(enhancingMeme["memeID"])
entityB = api.createEntityFromMeme(enhancedMeme["memeID"])
# Check to ensure that they have the peoper properties
entityAhasA = Graph.api.getEntityHasProperty(entityA, "A")
entityBhasA = Graph.api.getEntityHasProperty(entityB, "A")
entityAhasB = Graph.api.getEntityHasProperty(entityA, "B")
entityBhasB = Graph.api.getEntityHasProperty(entityB, "B")
if entityAhasA == False:
part2AllTrue = False
if entityBhasA == True:
part2AllTrue = False
if entityAhasB == True:
part2AllTrue = False
if entityBhasB == False:
part2AllTrue = False
# Use one meme to enhanece the other.
unusedReturn = api.sourceMemeEnhancementAdd(enhancingMeme["memeID"], enhancedMeme["memeID"])
# Test that it has all properties
entityAB = api.createEntityFromMeme(enhancedMeme["memeID"])
entityABhasA = Graph.api.getEntityHasProperty(entityAB, "A")
entityABhasB = Graph.api.getEntityHasProperty(entityAB, "B")
if entityABhasA == False:
part2AllTrue = False
if entityABhasB == False:
part2AllTrue = False
part2AllTrue = str(part2AllTrue)
results = [n, testcase, part2AllTrue, "True", []]
resultSet.append(results)
except Exception as e:
results = [n, testcase, "False", "True", []]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testSourceEnhancementRemove(filename):
method = moduleName + '.' + 'testSourceEnhancementRemove'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, filename)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
unicodeReadLine = str(eachReadLine)
stringArray = str.split(unicodeReadLine)
metamemePath = stringArray[0]
modulePath = "%s_remove" %stringArray[1]
memeName = stringArray[2]
enhancedMetamemePath = stringArray[3]
enhancedModulePath = "%s_remove" %stringArray[4]
enhancedMemeName = stringArray[5]
sourceMeme = ['']
sourceMemberMeme = ['']
testResult = str(False)
try:
sourceMeme = api.sourceMemeCreate(memeName, modulePath, metamemePath)
sourceMemberMeme = api.sourceMemeCreate(enhancedMemeName, enhancedModulePath, enhancedMetamemePath)
unusedAddEnhancement = api.sourceMemeEnhancementAdd(sourceMeme["memeID"], sourceMemberMeme["memeID"])
operationResult = api.sourceMemeEnhancementRemove(sourceMeme["memeID"], sourceMemberMeme["memeID"])
validation = operationResult["ValidationResults"]
if validation[0] == True:
testResult = str(True)
else:
testResult = str(False)
errata = validation[1]
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
operationResult = {"memeID" : "%s.%s" %(modulePath, memeName), "ValidationResults" : [False, errorMsg]}
errata.append(errorMsg)
testcase = "%s enhancing %s" %(sourceMeme["memeID"], sourceMemberMeme["memeID"])
expectedResult = "True"
results = [n, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
#Part 2 - Create two generic memes and use one to enhance the other
# Create the two memes
# Add a property to each
# Create entities from the two memes
# Check to ensure that they have the peoper properties
# Use one meme to enhanece the other.
# Create a new entity.
# Test that it has all properties
# Remove the enhancement
# Create a new entity and test that the enhancing property is not there
part2AllTrue = True
# Create the two memes
enhancingMeme = api.sourceMemeCreate("Enhancing")
enhancedMeme = api.sourceMemeCreate("Enhanced")
testcase = "Generic enhancing Generic"
try:
# Add a property to each
api.sourceMemePropertySet(enhancingMeme["memeID"], "A", "A")
api.sourceMemePropertySet(enhancedMeme["memeID"], "B", "B")
# Create entities from the two memes
entityA = api.createEntityFromMeme(enhancingMeme["memeID"])
entityB = api.createEntityFromMeme(enhancedMeme["memeID"])
# Check to ensure that they have the peoper properties
entityAhasA = Graph.api.getEntityHasProperty(entityA, "A")
entityBhasA = Graph.api.getEntityHasProperty(entityB, "A")
entityAhasB = Graph.api.getEntityHasProperty(entityA, "B")
entityBhasB = Graph.api.getEntityHasProperty(entityB, "B")
if entityAhasA == False:
part2AllTrue = False
if entityBhasA == True:
part2AllTrue = False
if entityAhasB == True:
part2AllTrue = False
if entityBhasB == False:
part2AllTrue = False
# Use one meme to enhanece the other.
unusedReturn = api.sourceMemeEnhancementAdd(enhancingMeme["memeID"], enhancedMeme["memeID"])
# Test that it has all properties
entityAB = api.createEntityFromMeme(enhancedMeme["memeID"])
entityABhasA = Graph.api.getEntityHasProperty(entityAB, "A")
entityABhasB = Graph.api.getEntityHasProperty(entityAB, "B")
if entityABhasA == False:
part2AllTrue = False
if entityABhasB == False:
part2AllTrue = False
# Remove the enhancement
unusedReturn = api.sourceMemeEnhancementRemove(enhancingMeme["memeID"], enhancedMeme["memeID"])
# Create a new entity and test that the enhancing property is not there
entityABRemoved = api.createEntityFromMeme(enhancedMeme["memeID"])
entityABRemovedHasA = Graph.api.getEntityHasProperty(entityABRemoved, "A")
entityABRemovedHasB = Graph.api.getEntityHasProperty(entityABRemoved, "B")
if entityABRemovedHasA == True:
part2AllTrue = False
if entityABRemovedHasB == False:
part2AllTrue = False
part2AllTrue = str(part2AllTrue)
results = [n, testcase, part2AllTrue, "True", []]
resultSet.append(results)
except Exception as e:
results = [n, testcase, "False", "True", []]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testSourceSingletonSet(filename):
method = moduleName + '.' + 'testSourceEnhancementRemove'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, filename)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
unicodeReadLine = str(eachReadLine)
stringArray = str.split(unicodeReadLine)
metamemePath = stringArray[0]
modulePath = "%s_singleton" %stringArray[1]
memeName = stringArray[2]
sourceMeme = ['']
testResult = str(False)
afterSingleton = False
afterRemoval = False
operationResult = {}
try:
sourceMeme = api.sourceMemeCreate(memeName, modulePath, metamemePath)
setAsSingleton = api.sourceMemeSetSingleton(sourceMeme["memeID"], True)
afterSingleton = api.getIsMemeSingleton(sourceMeme["memeID"])
if afterSingleton == False:
verboseResults = setAsSingleton["ValidationResults"]
errata.append(verboseResults[1])
setAsNonSingleton = api.sourceMemeSetSingleton(sourceMeme["memeID"], False)
afterRemoval = api.getIsMemeSingleton(sourceMeme["memeID"])
if afterRemoval == True:
verboseResults = setAsNonSingleton["ValidationResults"]
errata.append(verboseResults[1])
operationResult = {"memeID" : sourceMeme["memeID"], "ValidationResults" : [True, []]}
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
operationResult = {"memeID" : "%s.%s" %(modulePath, memeName), "ValidationResults" : [False, errorMsg]}
errata.append(errorMsg)
if (afterSingleton == True) and (afterRemoval == False):
testResult = str(True)
testcase = str(operationResult["memeID"])
expectedResult = "True"
results = [n, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testGeneric():
"""
Greate a generic meme; one of type Graphyne.Generic.
"""
method = moduleName + '.' + 'testGeneric'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
resultSet = []
errata = []
testResult = False
expectedResult = "True"
try:
testEntityID = api.createEntity()
memeType = api.getEntityMemeType(testEntityID)
if memeType == "Graphyne.Generic":
operationResult = {"memeID" : "Graphyne.Generic", "ValidationResults" : [True, []]}
testResult = "True"
else:
errorMsg = ('Generic Entity Has meme type = %s' % (memeType) )
operationResult = {"memeID" : "Graphyne.Generic", "ValidationResults" : [True, []]}
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
operationResult = {"memeID" : "Graphyne.Generic", "ValidationResults" : [False, errorMsg]}
errata.append(errorMsg)
testcase = str(operationResult["memeID"])
results = [1, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(1)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testDeleteEntity():
"""
Test Entity Removal.
Create 5 entities of type Graphyne.Generic.
Chain them together: E1 >> E2 >> E3 >> E4 >> E5
Check that they are functional
Traverse from E1 to E5
Traverse from E5 to E1
Delete E3
We should not be able to traverse form E1 to E5
We should not be able to traverse form E5 to E1
We should not be able to traverse from E2 to E3
We should not be able to traverse from E3 to E2
We should not be able to traverse from E4 to E3
We should not be able to traverse from E3 to E4
We should be able to traverse from E1 to E2
We should be able to traverse from E2 to E1
We should be able to traverse from E4 to E5
We should be able to traverse from E5 to E4
We should not be able to aquire E3 via getEntity()
"""
method = moduleName + '.' + 'testDeleteEntity'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
resultSet = []
errata = []
testResult = "True"
expectedResult = "True"
errorMsg = ""
#Create 5 entities of type Graphyne.Generic. Chain them together: E1 >> E2 >> E3 >> E4 >> E5
try:
testEntityID1 = api.createEntity()
testEntityID2 = api.createEntity()
testEntityID3 = api.createEntity()
testEntityID4 = api.createEntity()
testEntityID5 = api.createEntity()
api.addEntityLink(testEntityID1, testEntityID2)
api.addEntityLink(testEntityID2, testEntityID3)
api.addEntityLink(testEntityID3, testEntityID4)
api.addEntityLink(testEntityID4, testEntityID5)
except Exception as e:
testResult = "False"
errorMsg = ('Error creating entities! Traceback = %s' % (e) )
errata.append(errorMsg)
#Navitate to end of chain and back
try:
uuid15 = api.getLinkCounterpartsByType(testEntityID1, "Graphyne.Generic::Graphyne.Generic::Graphyne.Generic::Graphyne.Generic")
uuid11 = api.getLinkCounterpartsByType(uuid15[0], "Graphyne.Generic::Graphyne.Generic::Graphyne.Generic::Graphyne.Generic")
if (uuid15[0] != testEntityID5) or (uuid11[0] != testEntityID1):
testResult = "False"
errorMsg = ('%sShould be able to navigate full chain and back before deleting middle entity, but could not!\n')
except Exception as e:
testResult = "False"
errorMsg = ('Error deleting Entity! Traceback = %s' % (e) )
errata.append(errorMsg)
#Delete E3
try:
api.destroyEntity(testEntityID3)
except Exception as e:
testResult = "False"
errorMsg = ('Error deleting Entity! Traceback = %s' % (e) )
errata.append(errorMsg)
#E3 should no longer be there
try:
e3 = api.getEntity(testEntityID3)
if e3 is not None:
testResult = "False"
errorMsg = ('Deleted entity still present!')
errata.append(errorMsg)
except Exceptions.NoSuchEntityError as e:
#We expect a NoSuchEntityError here
pass
except Exception as e:
#But we ONLY expect a NoSuchEntityError exception. Anything else is a problem
testResult = "False"
errorMsg = ('Unexpected Error while checking for previously deleted entity! Traceback = %s' % (e) )
errata.append(errorMsg)
#But E4 should remain
try:
e4 = api.getEntity(testEntityID4)
if e4 is None:
testResult = "False"
errorMsg = ('Entity that should not be deleted was!')
errata.append(errorMsg)
except Exception as e:
testResult = "False"
errorMsg = ('Error while checking to see if entity that was not supposed to be deleted is still present! Traceback = %s' % (e) )
errata.append(errorMsg)
#Post delete navigation
try:
#First hops should work
uuid22 = api.getLinkCounterpartsByType(testEntityID1, "Graphyne.Generic")
uuid24 = api.getLinkCounterpartsByType(testEntityID5, "Graphyne.Generic")
if (len(uuid22) == 0) or (len(uuid24) == 0) :
testResult = "False"
errorMsg = ('%sShould be able to navigate between undeleted entities, but can not!\n' %errorMsg)
except Exception as e:
testResult = "False"
errorMsg = ('%sProblem in spost delete navigation between undeleted entities. Traceback = %s' %(errorMsg, e))
try:
#This should not
uuid25 = api.getLinkCounterpartsByType(testEntityID1, "Graphyne.Generic::Graphyne.Generic::Graphyne.Generic::Graphyne.Generic")
uuid21 = api.getLinkCounterpartsByType(testEntityID5, "Graphyne.Generic::Graphyne.Generic::Graphyne.Generic::Graphyne.Generic")
if (len(uuid25) > 0) or (len(uuid21) > 0) :
testResult = "False"
errorMsg = ('%sShould not be able to navigate full chain and back, but did!\n' %errorMsg)
except: pass
try:
#neither should this
nearestNeighbors = api.getLinkCounterpartsByType(testEntityID2, "*")
if (testEntityID1 not in nearestNeighbors) or (testEntityID4 in nearestNeighbors) :
testResult = "False"
errorMsg = ('%sShould not be able to navigate full chain and back, but did!\n' %errorMsg)
except: pass
testcase = "Deletion"
results = [1, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(1)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testSubatomicLinks():
"""
Test creating and traversing subatomic links
Create 3 entities of type Graphyne.Generic.
"""
method = moduleName + '.' + 'testSubatomicLinks'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
resultSet = []
errata = []
testResult = "True"
expectedResult = "True"
errorMsg = ""
#Create 5 entities of type Graphyne.Generic and get the Examples.MemeA4 singleton as well.
#Chain them together: E1 >> E2 >> E3 >> E4 >> Examples.MemeA4 << E5
try:
testEntityID1 = api.createEntity()
testEntityID2 = api.createEntity()
testEntityID3 = api.createEntity()
api.addEntityLink(testEntityID1, testEntityID2) #Atomic
api.addEntityLink(testEntityID2, testEntityID3, {}, 1) #Subatomic
except Exception as e:
testResult = "False"
errorMsg = ('Error creating entities! Traceback = %s' % (e) )
errata.append(errorMsg)
#Atomic Navigation
try:
uuid12 = api.getLinkCounterpartsByType(testEntityID2, "Graphyne.Generic", 0)
if len(uuid12) != 1:
testResult = "False"
errorMsg = ('%sError in getLinkCounterpartsByType() chile checking for Atomic links. Memberlist should return exactly one entry. Actually returned %s members!\n' %len(uuid12))
elif uuid12[0] != testEntityID1:
testResult = "False"
errorMsg = ('%sError in getLinkCounterpartsByType() chile checking for Atomic links. Wrong cluster sibling returned.!\n')
except Exception as e:
testResult = "False"
errorMsg = ('Error traversing atomic link! Traceback = %s' % (e) )
errata.append(errorMsg)
#SubAtomic Navigation
try:
uuid23 = api.getLinkCounterpartsByType(testEntityID2, "Graphyne.Generic", 1)
if len(uuid23) != 1:
testResult = "False"
errorMsg = ('%sError in getLinkCounterpartsByType() chile checking for SubAtomic links. Memberlist should return exactly one entry. Actually returned %s members!\n' %len(uuid12))
elif uuid23[0] != testEntityID3:
testResult = "False"
errorMsg = ('%sError in getLinkCounterpartsByType() chile checking for SubAtomic links. Wrong cluster sibling returned.!\n')
except Exception as e:
testResult = "False"
errorMsg = ('Error traversing subatomic link Traceback = %s' % (e) )
errata.append(errorMsg)
#Universal Navigation
try:
uuidBoth = api.getLinkCounterpartsByType(testEntityID2, "Graphyne.Generic")
if len(uuidBoth) != 2:
testResult = "False"
errorMsg = ('%sError in getLinkCounterpartsByType() chile checking for SubAtomic links. Memberlist should return exactly two entries. Actually returned %s members!\n' %len(uuid12))
except Exception as e:
testResult = "False"
errorMsg = ('Error traversing link! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = "Subatomic Links"
results = [1, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(1)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testGetClusterMembers():
"""
Test Getting Clister Members.
Create 6 entities of type Graphyne.Generic.
Chain four of them together: E1 >> E2 >> E3 >> E4
Connect E4 to a singleton, Examples.MemeA4
Connect E5 to Examples.MemeA4
Connect E3 to E6 via a subatomic link
Check that we can traverse from E1 to E5.
Get the cluseter member list of E3 with linktype = None. It should include E2, E3, E4, E6
Get the cluseter member list of E3 with linktype = 0. It should include E2, E3, E4
Get the cluseter member list of E3 with linktype = 1. It should include E6
Get the cluseter member list of E5. It should be empty
memeStructure = script.getClusterMembers(conditionContainer, 1, False)
"""
method = moduleName + '.' + 'testGetClusterMembers'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
resultSet = []
errata = []
testResult = "True"
expectedResult = "True"
errorMsg = ""
#Create 5 entities of type Graphyne.Generic and get the Examples.MemeA4 singleton as well.
#Chain them together: E1 >> E2 >> E3 >> E4 >> Examples.MemeA4 << E5
try:
testEntityID1 = api.createEntity()
testEntityID2 = api.createEntity()
testEntityID3 = api.createEntity()
testEntityID4 = api.createEntity()
testEntityID5 = api.createEntity()
testEntityID6 = api.createEntity()
theSingleton = Graph.api.createEntityFromMeme("Examples.MemeA4")
api.addEntityLink(testEntityID1, testEntityID2)
api.addEntityLink(testEntityID2, testEntityID3)
api.addEntityLink(testEntityID3, testEntityID4)
api.addEntityLink(testEntityID3, testEntityID6, {}, 1)
api.addEntityLink(testEntityID4, theSingleton)
api.addEntityLink(testEntityID5, theSingleton)
except Exception as e:
testResult = "False"
errorMsg = ('Error creating entities! Traceback = %s' % (e) )
errata.append(errorMsg)
#Navitate to end of chain and back
try:
uuid15 = api.getLinkCounterpartsByType(testEntityID1, "Graphyne.Generic::Graphyne.Generic::Graphyne.Generic::Examples.MemeA4::Graphyne.Generic")
uuid11 = api.getLinkCounterpartsByType(uuid15[0], "Examples.MemeA4::Graphyne.Generic::Graphyne.Generic::Graphyne.Generic::Graphyne.Generic")
if (uuid15[0] != testEntityID5) or (uuid11[0] != testEntityID1):
testResult = "False"
errorMsg = ('%sShould be able to navigate full chain and back before measuring cluster membership, but could not!\n')
except Exception as e:
testResult = "False"
errorMsg = ('Error measuring cluster membership! Traceback = %s' % (e) )
errata.append(errorMsg)
#From E3, atomic
try:
entityListRaw = api.getClusterMembers(testEntityID3)
entityList1 = []
for entityUUID in entityListRaw:
entityList1.append(entityUUID)
if testEntityID1 not in entityList1:
testResult = "False"
errorMsg = ('%E1 should be in atomic link cluster of E3, but is not!\n' %errorMsg)
if testEntityID2 not in entityList1:
testResult = "False"
errorMsg = ('%E2 should be in atomic link cluster of E3, but is not!\n' %errorMsg)
if testEntityID4 not in entityList1:
testResult = "False"
errorMsg = ('%E4 should be in atomic link cluster of E3, but is not!\n' %errorMsg)
if theSingleton not in entityList1:
testResult = "False"
errorMsg = ('%Examples.MemeA4 should be in atomic link cluster of E3, but is not!\n' %errorMsg)
if len(entityList1) != 4:
testResult = "False"
errorMsg = ('%E3 should have 3 siblings in its atomic link cluster, but it has %s!\n' %(errorMsg, len(entityList1)))
except Exception as e:
testResult = "False"
errorMsg = ('Getting atomic cluster of E3! Traceback = %s' % (e) )
errata.append(errorMsg)
#From E3, subatomic
try:
entityList2 = api.getClusterMembers(testEntityID3, 1)
if testEntityID6 not in entityList2:
testResult = "False"
errorMsg = ('%E6 should be in subatomic link cluster of E3, but is not!\n' %errorMsg)
if len(entityList2) != 1:
testResult = "False"
errorMsg = ('%E3 should have 1 sibling in its atomic link cluster, but it has %s!\n' %(errorMsg, len(entityList1)))
except Exception as e:
testResult = "False"
errorMsg = ('Getting atomic cluster of E3! Traceback = %s' % (e) )
errata.append(errorMsg)
#From E5, atomic
try:
entityList3 = api.getClusterMembers(testEntityID5)
if theSingleton not in entityList3:
testResult = "False"
errorMsg = ('%Examples.MemeA4 should be in atomic link cluster of E3, but is not!\n' %errorMsg)
if len(entityList3) != 1:
testResult = "False"
errorMsg = ('%E5 should have 0 siblings in its atomic link cluster, but it has %s!\n' %(errorMsg, len(entityList1)))
except Exception as e:
testResult = "False"
errorMsg = ('Getting atomic cluster of E5! Traceback = %s' % (e) )
errata.append(errorMsg)
#From E5, subatomic
try:
entityList4 = api.getClusterMembers(testEntityID5)
if len(entityList4) != 1:
testResult = "False"
errorMsg = ('%E5 should have 1 sibling in its atomic link cluster, but it has %s!\n' %(errorMsg, len(entityList1)))
except Exception as e:
testResult = "False"
errorMsg = ('Getting atomic cluster of E5! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = "getClusterMembers()"
results = [1, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(1)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testGetHasCounterpartsByType(phaseName = 'getHasCounterpartsByType', fName = "Entity_Phase7.atest"):
'''
Basically a repeat of Phase 7, but with getHasCounterpartsByType()
Create entities from the meme in the first two colums.
Add a link between the two at the location on entity in from column 3.
Check and see if each is a counterpart as seen from the other using the addresses in columns 4&5 (CheckPath & Backpath)
& the filter.
The filter must be the same as the type of link (or None)
The check location must be the same as the added loation.
'''
method = moduleName + '.' + phaseName
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
lresultSet = []
del lresultSet[:]
#try:
testFileName = os.path.join(testDirPath, fName)
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.INFO , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = False
try:
entityID0 = Graph.api.createEntityFromMeme(stringArray[0])
entityID1 = Graph.api.createEntityFromMeme(stringArray[1])
entityID2 = Graph.api.createEntityFromMeme(stringArray[1])
#Attach entityID1 at the mount point specified in stringArray[2]
if stringArray[2] != "X":
mountPoints = api.getLinkCounterpartsByType(entityID0, stringArray[2], 0)
unusedMountPointsOverview = {}
for mountPoint in mountPoints:
try:
mpMemeType = api.getEntityMemeType(mountPoint)
unusedMountPointsOverview[mountPoint] = mpMemeType
except Exception as e:
#errorMessage = "debugHelperMemeType warning in Smoketest.testEntityPhase7. Traceback = %s" %e
#Graph.logQ.put( [logType , logLevel.WARNING , method , errorMessage])
raise e
for mountPoint in mountPoints:
api.addEntityLink(mountPoint, entityID1, {}, int(stringArray[5]))
else:
api.addEntityLink(entityID0, entityID1, {}, int(stringArray[5]))
backTrackCorrect = False
linkType = None
if stringArray[6] != "X":
linkType = int(stringArray[6])
backTrackCorrect = False
linkType = None
if stringArray[6] != "X":
linkType = int(stringArray[6])
#see if we can get from entityID0 to entityID1 via stringArray[3]
addLocationCorrect = api.getHasCounterpartsByType(entityID0, stringArray[3], linkType)
#see if we can get from entityID1 to entityID0 via stringArray[4]
backTrackCorrect = api.getHasCounterpartsByType(entityID1, stringArray[4], linkType)
#see if we can get from entityID2 to entityID0 via stringArray[4]
e3Attached = api.getHasCounterpartsByType(entityID2, stringArray[4], linkType)
if (backTrackCorrect == True) and (addLocationCorrect == True) and (e3Attached == False):
testResult = True
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[7]
results = [n, testcase, allTrueResult, expectedResult, errata]
lresultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return lresultSet
def testGetEntityMetaMemeType():
"""
Greate a generic meme; one of type Graphyne.Generic.
Ensure that it's metameme is Graphyne.GenericMetaMeme
"""
method = moduleName + '.' + 'testGetEntityMetaMemeType'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
resultSet = []
errata = []
testResult = False
expectedResult = "True"
try:
testEntityID = api.createEntity()
metaMemeType = api.getEntityMetaMemeType(testEntityID)
if metaMemeType == "Graphyne.GenericMetaMeme":
operationResult = {"metamemeID" : "Graphyne.GenericMetaMeme", "ValidationResults" : [True, []]}
testResult = "True"
else:
errorMsg = ('Generic Entity Has metameme type = %s' % (metaMemeType) )
operationResult = {"metamemeID" : "Graphyne.GenericMetaMeme", "ValidationResults" : [True, []]}
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
operationResult = {"metamemeID" : "Graphyne.GenericMetaMeme", "ValidationResults" : [False, errorMsg]}
errata.append(errorMsg)
testcase = str(operationResult["metamemeID"])
results = [1, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(1)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testInstallExecutor():
"""
Greate a generic meme; one of type Graphyne.Generic.
Ensure that it's metameme is Graphyne.GenericMetaMeme
"""
method = moduleName + '.' + 'testInstallExecutor'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
resultSet = []
errata = []
testResult = "True"
errorMsg = ""
expectedResult = "True"
try:
testEntityID = api.createEntity()
e2MemeType = api.getEntityMemeType(testEntityID)
from Config.Test.TestRepository import InstallPyExecTest as testMod
testExec = testMod.TestClass(e2MemeType)
api.installPythonExecutor(testEntityID, testExec)
#The execute() method of testMod.TestClass hould return the memeID when
returnVal1 = api.evaluateEntity(testEntityID)
if returnVal1 != e2MemeType:
testResult = "False"
errorMsg = ("%sCalling TestClass.execute() should return %s, but %s was returned instead!\n" %(errorMsg, e2MemeType, returnVal1))
else:
operationResult = {"metamemeID" : "Graphyne.GenericMetaMeme", "ValidationResults" : [True, errorMsg]}
if testResult == "True":
returnVal2 = api.evaluateEntity(testEntityID, {"returnMe" : "Hello World"})
if returnVal2 != "Hello World":
testResult = "False"
errorMsg = ("%sCalling TestClass.execute() with 'returnMe' in runtime parameter keys should return 'Hello World', but %s was returned instead!\n" %(errorMsg, returnVal2))
else:
operationResult = {"metamemeID" : "Graphyne.GenericMetaMeme", "ValidationResults" : [True, []]}
if testResult == "True":
try:
unusedReturnVal3 = api.evaluateEntity(testEntityID, {"thisWontReturnAnything" : "Hello World"})
testResult = "False"
errorMsg = ("%sCalling TestClass.execute() 'thisWontReturnAnything' in runtime parameter keys should return a keyError exception, but %s was returned instead!\n" %(errorMsg, returnVal2))
except Exceptions.EventScriptFailure as e:
#We should have this result
operationResult = {"metamemeID" : "Graphyne.GenericMetaMeme", "ValidationResults" : [True, errorMsg]}
except Exception as e:
testResult = "False"
errorMsg = ('Error! Traceback = %s' % (e) )
operationResult = {"metamemeID" : "Graphyne.GenericMetaMeme", "ValidationResults" : [False, errorMsg]}
errata.append(errorMsg)
testcase = str(operationResult["metamemeID"])
results = [1, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(1)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testGetCluster():
"""
Test Getting Cluster Dictionary.
Create 6 entities of type Graphyne.Generic.
Chain four of them together: E1 >> E2 >> E3 >> E4
Connect E4 to a singleton, Examples.MemeA4
Connect E5 to Examples.MemeA4
Connect E3 to E6 via a subatomic link
Check that we can traverse from E1 to E5.
Get the cluster member list of E3 with linktype = None. It should include E2, E3, E4, E6
Get the cluster member list of E3 with linktype = 0. It should include E2, E3, E4
Get the cluster member list of E3 with linktype = 1. It should include E6
Get the cluster member list of E5. It should be empty
memeStructure = script.getClusterMembers(conditionContainer, 1, False)
"""
method = moduleName + '.' + 'testGetClusterMembers'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
resultSet = []
errata = []
testResult = "True"
expectedResult = "True"
errorMsg = ""
#Create 5 entities of type Graphyne.Generic and get the Examples.MemeA4 singleton as well.
#Chain them together: E1 >> E2 >> E3 >> E4 >> Examples.MemeA4 << E5
try:
testEntityID1 = api.createEntity()
testEntityID2 = api.createEntity()
testEntityID3 = api.createEntity()
testEntityID4 = api.createEntity()
testEntityID5 = api.createEntity()
testEntityID6 = api.createEntity()
theSingleton = Graph.api.createEntityFromMeme("Examples.MemeA4")
api.addEntityLink(testEntityID1, testEntityID2)
api.addEntityLink(testEntityID2, testEntityID3)
api.addEntityLink(testEntityID3, testEntityID4)
api.addEntityLink(testEntityID3, testEntityID6, {}, 1)
api.addEntityLink(testEntityID4, theSingleton)
api.addEntityLink(testEntityID5, theSingleton)
except Exception as e:
testResult = "False"
errorMsg = ('Error creating entities! Traceback = %s' % (e) )
errata.append(errorMsg)
#Navitate to end of chain and back
try:
uuid15 = api.getLinkCounterpartsByType(testEntityID1, ">>Graphyne.Generic>>Graphyne.Generic>>Graphyne.Generic>>Examples.MemeA4<<Graphyne.Generic", None, True)
uuid11 = api.getLinkCounterpartsByType(uuid15[0], "Examples.MemeA4<<Graphyne.Generic<<Graphyne.Generic<<Graphyne.Generic<<Graphyne.Generic", None, True)
if (testEntityID5 not in uuid15) or (testEntityID1 not in uuid11):
testResult = "False"
errorMsg = ('%sShould be able to navigate full chain and back before measuring cluster membership, but could not!\n')
except Exception as e:
testResult = "False"
errorMsg = ('Error measuring cluster membership! Traceback = %s' % (e) )
errata.append(errorMsg)
#From E3, atomic
try:
entityListRaw = api.getCluster(testEntityID3)
entityList1 = []
for entityNode in entityListRaw["nodes"]:
entityList1.append(entityNode['id'])
if str(testEntityID1) not in entityList1:
testResult = "False"
errorMsg = ('%E1 should be in atomic link cluster of E3, but is not!\n' %errorMsg)
if str(testEntityID2) not in entityList1:
testResult = "False"
errorMsg = ('%E2 should be in atomic link cluster of E3, but is not!\n' %errorMsg)
if str(testEntityID4) not in entityList1:
testResult = "False"
errorMsg = ('%E4 should be in atomic link cluster of E3, but is not!\n' %errorMsg)
if str(theSingleton) not in entityList1:
testResult = "False"
errorMsg = ('%Examples.MemeA4 should be in atomic link cluster of E3, but is not!\n' %errorMsg)
if len(entityList1) != 5:
testResult = "False"
errorMsg = ('%E3 should have 5 members in its atomic link cluster - itself, 3 generics and the singleton - in its atomic link cluster, but it has %s members!\n' %(errorMsg, len(entityList1)))
except Exception as e:
testResult = "False"
errorMsg = ('Getting atomic cluster of E3! Traceback = %s' % (e) )
errata.append(errorMsg)
#From E3, subatomic
try:
entityListRaw = api.getCluster(testEntityID3, 1)
entityList2 = []
for entityNode in entityListRaw["nodes"]:
entityList2.append(entityNode['id'])
if str(testEntityID6) not in entityList2:
testResult = "False"
errorMsg = ('%E6 should be in subatomic link cluster of E3, but is not!\n' %errorMsg)
if len(entityList2) != 2:
testResult = "False"
errorMsg = ('%E3 should have 2 members in its subatomic link cluster - itself and 1 sibling, but it has %s members!\n' %(errorMsg, len(entityList1)))
except Exception as e:
testResult = "False"
errorMsg = ('Getting atomic cluster of E3! Traceback = %s' % (e) )
errata.append(errorMsg)
#From E5, atomic
try:
entityListRaw = api.getCluster(testEntityID5)
entityList3 = []
for entityNode in entityListRaw["nodes"]:
entityList3.append(entityNode['id'])
if str(theSingleton) not in entityList3:
testResult = "False"
errorMsg = ('%Examples.MemeA4 should be in atomic link cluster of E5, but is not!\n' %errorMsg)
if len(entityList3) != 2:
testResult = "False"
errorMsg = ('%E5 should 2 members in its atomic link cluster, itself and the Examples.MemeA4 singleton, but the cluster has %s members!\n' %(errorMsg, len(entityList1)))
except Exception as e:
testResult = "False"
errorMsg = ('Getting atomic cluster of E5! Traceback = %s' % (e) )
errata.append(errorMsg)
#From E5, subatomic
try:
entityListRaw = api.getCluster(testEntityID5, 1)
entityList4 = []
for entityNode in entityListRaw["nodes"]:
entityList4.append(entityNode['id'])
if len(entityList4) != 1:
testResult = "False"
errorMsg = ('%E5 should be alone in its atomic link cluster, but the cluster has %s members!\n' %(errorMsg, len(entityList1)))
except Exception as e:
testResult = "False"
errorMsg = ('Getting atomic cluster of E5! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = "getClusterMembers()"
results = [1, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(1)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testGetTraverseReport():
"""
Test Getting Traverse Report Dictionary.
Create 4 entities of type Graphyne.Generic. (as in the cluser test)
Chain 3 of them together: E1 >> E2 >> E3
Connect E4 to a singleton, Examples.MemeA4
Connect E4 to Examples.MemeA4
Now get the traverse report from E1 to E4.
The traverse report step for E1 should contain E2 and only E2.
The traverse report step for E2 should contain E1 and E3.
The traverse report step for E3 should contain E2 and Examples.MemeA4.
The traverse report step for E3 should contain Examples.MemeA4 and only Examples.MemeA4.
memeStructure = script.getTraversePathReport(conditionContainer, 1, False)
"""
method = moduleName + '.' + 'testGetTraverseReport'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
resultSet = []
errata = []
testResult = "True"
expectedResult = "True"
errorMsg = ""
#Create 5 entities of type Graphyne.Generic and get the Examples.MemeA4 singleton as well.
#Chain them together: E1 >> E2 >> E3 >> E4 >> Examples.MemeA4 << E5
try:
testEntityID1 = api.createEntity()
testEntityID2 = api.createEntity()
testEntityID3 = api.createEntity()
testEntityID4 = api.createEntity()
theSingleton = Graph.api.createEntityFromMeme("Examples.MemeA4")
api.addEntityLink(testEntityID1, testEntityID2)
api.addEntityLink(testEntityID2, testEntityID3)
api.addEntityLink(testEntityID3, theSingleton)
api.addEntityLink(testEntityID4, theSingleton)
except Exception as e:
testResult = "False"
errorMsg = ('Error creating entities! Traceback = %s' % (e) )
errata.append(errorMsg)
traverseStringByMeme = ">>Graphyne.Generic>>Graphyne.Generic>>Examples.MemeA4<<Graphyne.Generic"
# Reference for debugging assistance
unusedExpectedReport = {testEntityID1 : {"meme" : "Graphyne.Generic",
"metameme" : "Graphyne.GenericMetaMeme",
"members" : {testEntityID2 : {"meme" : "Graphyne.Generic", "metameme" : "Graphyne.GenericMetaMeme"}}},
testEntityID2 : {"meme" : "Graphyne.Generic",
"metameme" : "Graphyne.GenericMetaMeme",
"members" : {testEntityID1 : {"meme" : "Graphyne.Generic", "metameme" : "Graphyne.GenericMetaMeme"},
testEntityID3 : {"meme" : "Graphyne.Generic", "metameme" : "Graphyne.GenericMetaMeme"}}},
testEntityID3 : {"meme" : "Graphyne.Generic",
"metameme" : "Graphyne.GenericMetaMeme",
"members" : {testEntityID2 : {"meme" : "Graphyne.Generic", "metameme" : "Graphyne.GenericMetaMeme"},
theSingleton : {"meme" : "Examples.MemeA4", "metameme" : "Examples.A"}}},
testEntityID4 : {"meme" : "Graphyne.Generic",
"metameme" : "Graphyne.GenericMetaMeme",
"members" : {theSingleton : {"meme" : "Examples.MemeA4", "metameme" : "Examples.A"}}}
}
#Navitate to end of chain and back
try:
uuid14 = api.getLinkCounterpartsByType(testEntityID1, traverseStringByMeme, None, True)
uuid41 = api.getLinkCounterpartsByType(uuid14[0], "Examples.MemeA4<<Graphyne.Generic<<Graphyne.Generic<<Graphyne.Generic", None, True)
if (testEntityID4 not in uuid14) or (testEntityID1 not in uuid41):
testResult = "False"
errorMsg = ('%sShould be able to navigate full chain and back before measuring cluster membership, but could not!\n')
except Exception as e:
testResult = "False"
errorMsg = ('Error reporting on traverse path! Traceback = %s' % (e) )
errata.append(errorMsg)
#From E1, atomic, Meme traverse
# def getTraverseReport(self, entityUUID, traversePath, isMeme = True, linkType = None, returnUniqueValuesOnly = True):
try:
reportRaw = api.getTraverseReport(testEntityID1, traverseStringByMeme)
except Exception as e:
testResult = "False"
errorMsg = ('Getting atomic cluster of E3! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = "getTraverseReport()"
reportNodes = reportRaw["nodes"]
reportLinks = reportRaw["links"]
e1ID = str(testEntityID1)
e2ID = str(testEntityID2)
e3ID = str(testEntityID3)
e4ID = str(testEntityID4)
eSID = str(theSingleton)
traverseNodeKeyList = []
for reportNode in reportNodes:
traverseNodeKeyList.append(reportNode["id"])
if e1ID not in traverseNodeKeyList:
testResult = False
if e2ID not in traverseNodeKeyList:
testResult = False
if e3ID not in traverseNodeKeyList:
testResult = False
if e4ID not in traverseNodeKeyList:
testResult = False
if eSID not in traverseNodeKeyList:
testResult = False
tempTraverse12Found = False #Entity 1 shoukd be the parent of 2, but not be connected to 3 or 4
tempTraverse23Found = False
tempTraverse3SFound = False #The singleton should be connected to 3 and 4, but not to 1 or 2.
tempTraverseS4Found = False
for traverseLink in reportLinks:
if (traverseLink["source"] == e1ID) and (traverseLink["target"] == e2ID):
tempTraverse12Found = True
if (traverseLink["source"] == e2ID) and (traverseLink["target"] == e3ID):
tempTraverse23Found = True
if (traverseLink["source"] == e3ID) and (traverseLink["target"] == eSID):
tempTraverse3SFound = True
if (traverseLink["source"] == e4ID) and (traverseLink["target"] == eSID):
tempTraverseS4Found = True
# 1 >> 2, but not 1 << 2
# 2 >> 3 and 1 >> 2, but not 2 << 3
# 3 >> S and S << 4, but not 3 << S or S >> 4
if (traverseLink["source"] == e2ID) and (traverseLink["target"] == e1ID):
testResult = False
if (traverseLink["source"] == e3ID) and (traverseLink["target"] == e2ID):
testResult = False #wrong link
if (traverseLink["source"] == eSID) and (traverseLink["target"] == e3ID):
testResult = False #wrong link
if (traverseLink["source"] == eSID) and (traverseLink["target"] == e4ID):
testResult = False
if (traverseLink["source"] == eSID) and (traverseLink["target"] == e4ID):
testResult = False
if (traverseLink["source"] == eSID) and (traverseLink["target"] == e1ID):
testResult = False
if (traverseLink["source"] == e1ID) and (traverseLink["target"] == e3ID):
testResult = False
if (tempTraverse12Found == False) or\
(tempTraverse23Found == False) or\
(tempTraverse3SFound == False) or\
(tempTraverseS4Found == False):
testResult = False
results = [1, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(1)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testPropertyChangeEvent():
"""
Create an entity from PropertyChangeEvent.PropChangeTest.
It starts with:
propA = 11 ( has an event script. Returns a hash "<oldVal> <newVal>" )
propB = xyz ( has an event script. Returns the entiry UUID)
propC = abc ( no SES)
1 - Alter its prop A to an allowed value. Verify the value of the return.
2 - Alter its prop A a second time (to an allowed value) and verify.
3 - Alter prop B and check that the returned UUID is correct.
4 - Alter prop C to a disallowed value. Verify that return is None
"""
method = moduleName + '.' + 'testPropertyChangeEvent'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
resultSet = []
errata = []
testResult = "True"
expectedResult = "True"
errorMsg = ""
#Create 5 entities of type Graphyne.Generic and get the Examples.MemeA4 singleton as well.
#Chain them together: E1 >> E2 >> E3 >> E4 >> Examples.MemeA4 << E5
try:
theEntity = Graph.api.createEntityFromMeme("PropertyChangeEvent.PropChangeTest")
except Exception as e:
testResult = "False"
errorMsg = ('Error creating entities! Traceback = %s' % (e) )
errata.append(errorMsg)
#Alter its prop A to an allowed value. Verify the value of the return.
try:
expectedReturnValue = "11 12"
returnValue = api.setEntityPropertyValue(theEntity, "propA", 12)
if returnValue != expectedReturnValue:
testResult = "False"
errorMsg = ('%sSetting the value of propA from 11 to 12 should return "%s" in the return value of the property change event. "%s returned" !\n' %(errorMsg, expectedReturnValue, returnValue))
except Exception as e:
testResult = "False"
errorMsg = ('Error setting value of propA! Traceback = %s' % (e) )
errata.append(errorMsg)
#Alter its prop A a second time (to an allowed value) and verify.
try:
expectedReturnValue = "12 15"
returnValue = api.setEntityPropertyValue(theEntity, "propA", 15)
if returnValue != expectedReturnValue:
testResult = "False"
errorMsg = ('%sSetting the value of propA from 12 to 15 should return "%s" in the return value of the property change event. "%s returned" !\n' %(errorMsg, expectedReturnValue, returnValue))
except Exception as e:
testResult = "False"
errorMsg = ('Error setting value of propA! Traceback = %s' % (e) )
errata.append(errorMsg)
#Alter prop B and check that the returned UUID is correct.
try:
returnValue = api.setEntityPropertyValue(theEntity, "propB", 'abc')
if returnValue != str(theEntity):
testResult = "False"
errorMsg = ('%sSetting the value of propB should return "%s" in the return value of the property change event. "%s returned" !\n' %(errorMsg, theEntity, returnValue))
except Exception as e:
testResult = "False"
errorMsg = ('Error setting value of propA! Traceback = %s' % (e) )
errata.append(errorMsg)
#Alter prop C to a disallowed value. Verify that return is None
try:
returnValue = api.setEntityPropertyValue(theEntity, "propC", 'xyz')
if returnValue != None:
testResult = "False"
errorMsg = ('%sSetting the value of propB should return "%s" in the return value of the property change event. "%s returned" !\n' %(errorMsg, None, returnValue))
except Exception as e:
testResult = "False"
errorMsg = ('Error setting value of propA! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = "propertyChangeEvent()"
results = [1, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(1)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testLinkEvent():
"""
Create two entities from LinkEvent.LinkChangeTest.
Greate three generic entities
1 - Link the a LinkEvent.LinkChangeTest entitiy with a generic one, with LinkChangeTest as the source
2 - Break the link
3 - Link the two with LinkChangeTest as the target
4 - Link the two generics
5 - Break the link
Create two generic entities
6 - Link the twoLinkEvent.LinkChangeTest entities
7 - Break the link
"""
method = moduleName + '.' + 'testLinkEvent'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
resultSet = []
errata = []
testResult = "True"
expectedResult = "True"
errorMsg = ""
#Create two entities from LinkEvent.LinkChangeTest.
#Greate three generic entities
try:
linkChangeTest0 = Graph.api.createEntityFromMeme("LinkEvent.LinkChangeTest")
linkChangeTest1 = Graph.api.createEntityFromMeme("LinkEvent.LinkChangeTest")
genEntity0 = Graph.api.createEntity()
genEntity1 = Graph.api.createEntity()
genEntity2 = Graph.api.createEntity()
except Exception as e:
testResult = "False"
errorMsg = ('Error creating entities! Traceback = %s' % (e) )
errata.append(errorMsg)
#1 - Link the a LinkEvent.LinkChangeTest entitiy with a generic one, with LinkChangeTest as the source
try:
expectedReturnValue10 = "Added %s as link source for %s" %(linkChangeTest0, genEntity0)
returnArray = api.addEntityLink(linkChangeTest0, genEntity0)
if returnArray[0] != expectedReturnValue10:
testResult = "False"
errorMsg = '%sAdding link to LinkEvent.LinkChangeTest should return "%s" in the return value [0] of the link added event. "%s returned" !\n' %(errorMsg, expectedReturnValue10, returnArray[0])
if returnArray[1] is not None:
testResult = "False"
errorMsg = '%sAdding link to LinkEvent.LinkChangeTest should return "%s" in the return value [0] of the link added event. "%s returned" !\n' %(errorMsg, None, returnArray[1])
except Exception as e:
testResult = "False"
errorMsg = ('Error adding link! Traceback = %s' % (e) )
errata.append(errorMsg)
#2 - Break the link
try:
expectedReturnValue20 = "Removed %s as link source for %s" %(linkChangeTest0, genEntity0)
returnArray = api.removeEntityLink(linkChangeTest0, genEntity0)
if returnArray[0] != expectedReturnValue20:
testResult = "False"
errorMsg = '%sRemoving link to LinkEvent.LinkChangeTest should return "%s" in the return value [0] of the link added event. "%s returned" !\n' %(errorMsg, expectedReturnValue20, returnArray[0])
if returnArray[1] is not None:
testResult = "False"
errorMsg = '%sRemoving link to LinkEvent.LinkChangeTest should return "%s" in the return value [0] of the link added event. "%s returned" !\n' %(errorMsg, None, returnArray[1])
except Exception as e:
testResult = "False"
errorMsg = ('Error removing link! Traceback = %s' % (e) )
errata.append(errorMsg)
#3 - Link the two with LinkChangeTest as the target
try:
expectedReturnValue30 = "Added %s as link target for %s" %(linkChangeTest0, genEntity0)
returnArray = api.addEntityLink(genEntity0, linkChangeTest0)
if returnArray[1] != expectedReturnValue30:
testResult = "False"
errorMsg = '%sAdding link to LinkEvent.LinkChangeTest should return "%s" in the return value [0] of the link added event. "%s returned" !\n' %(errorMsg, expectedReturnValue30, returnArray[0])
if returnArray[0] is not None:
testResult = "False"
errorMsg = '%sAdding link to LinkEvent.LinkChangeTest should return "%s" in the return value [0] of the link added event. "%s returned" !\n' %(errorMsg, None, returnArray[1])
except Exception as e:
testResult = "False"
errorMsg = ('Error adding link! Traceback = %s' % (e) )
errata.append(errorMsg)
#4 - Link two generics
try:
returnArray = api.addEntityLink(genEntity1, genEntity2)
if returnArray[0] is not None:
testResult = "False"
errorMsg = '%sAdding link to generic entity should return "%s" in the return value [0] of the link added event. "%s returned" !\n' %(errorMsg, None, returnArray[0])
if returnArray[1] is not None:
testResult = "False"
errorMsg = '%sAdding link to generic entity should return "%s" in the return value [0] of the link added event. "%s returned" !\n' %(errorMsg, None, returnArray[1])
except Exception as e:
testResult = "False"
errorMsg = ('Error adding link! Traceback = %s' % (e) )
errata.append(errorMsg)
#5 - Break the link
try:
returnArray = api.removeEntityLink(genEntity1, genEntity1)
if returnArray[0] is not None:
testResult = "False"
errorMsg = '%sRemoving link from generic entity should return "%s" in the return value [0] of the link added event. "%s returned" !\n' %(errorMsg, None, returnArray[0])
if returnArray[1] is not None:
testResult = "False"
errorMsg = '%sRemoving link from generic entity should return "%s" in the return value [0] of the link added event. "%s returned" !\n' %(errorMsg, None, returnArray[1])
except Exception as e:
testResult = "False"
errorMsg = ('Error removing link! Traceback = %s' % (e) )
errata.append(errorMsg)
#6 - Link the twoLinkEvent.LinkChangeTest entities
try:
expectedReturnValue60 = "Added %s as link source for %s" %(linkChangeTest0, linkChangeTest1)
expectedReturnValue61 = "Added %s as link target for %s" %(linkChangeTest1, linkChangeTest0)
returnArray = api.addEntityLink(linkChangeTest0, linkChangeTest1)
if returnArray[0] != expectedReturnValue60:
testResult = "False"
errorMsg = '%sAdding link to LinkEvent.LinkChangeTest should return "%s" in the return value [0] of the link added event. "%s returned" !\n' %(errorMsg, expectedReturnValue60, returnArray[0])
if returnArray[1] != expectedReturnValue61:
testResult = "False"
errorMsg = '%sAdding link to LinkEvent.LinkChangeTest should return "%s" in the return value [0] of the link added event. "%s returned" !\n' %(errorMsg, expectedReturnValue61, returnArray[1])
except Exception as e:
testResult = "False"
errorMsg = ('Error adding link! Traceback = %s' % (e) )
errata.append(errorMsg)
#6 - remove the link
try:
expectedReturnValue70 = "Removed %s as link source for %s" %(linkChangeTest0, linkChangeTest1)
expectedReturnValue71 = "Removed %s as link target for %s" %(linkChangeTest1, linkChangeTest0)
returnArray = api.removeEntityLink(linkChangeTest0, linkChangeTest1)
if returnArray[0] != expectedReturnValue70:
testResult = "False"
errorMsg = '%sRemoving link from LinkEvent.LinkChangeTest should return "%s" in the return value [0] of the link added event. "%s returned" !\n' %(errorMsg, expectedReturnValue70, returnArray[0])
if returnArray[1] != expectedReturnValue71:
testResult = "False"
errorMsg = '%sRemoving link from LinkEvent.LinkChangeTest should return "%s" in the return value [0] of the link added event. "%s returned" !\n' %(errorMsg, expectedReturnValue71, returnArray[1])
except Exception as e:
testResult = "False"
errorMsg = ('Error removing link! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = "testLinkEvent()"
results = [1, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(1)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testBrokenEvents():
"""
This method tests the SES event handling of broken scripts
The first series of tests (execute) runs with a SES script that:
1- causes an uncaught KeyError exception
2- causes the same KeyError exception, but catches and actively raises it (as an exception)
3 - The SES script class has no execute() method
The second series of tests (propertyChanged) runs with a SES script that:
1- causes an uncaught KeyError exception
2- causes the same KeyError exception, but catches and actively raises it (as an exception)
The third series of tests (linkAdd) runs with a SES script that:
1- causes an uncaught KeyError exception
2- causes the same KeyError exception, but catches and actively raises it (as an exception)
The fourth series of tests (linkRemove) runs with a SES script that:
1- causes an uncaught KeyError exception
2- causes the same KeyError exception, but catches and actively raises it (as an exception)
"""
method = moduleName + '.' + 'testBrokenEvents'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
resultSet = []
errata = []
testResult = "True"
expectedResult = "True"
errorMsg = ""
#Create two entities from LinkEvent.LinkChangeTest.
#Greate three generic entities
try:
entity0 = Graph.api.createEntityFromMeme("EventFailure.BrokenLinkChangeTest")
entity1 = Graph.api.createEntityFromMeme("EventFailure.ThrowsLinkChangeTest")
entity2 = Graph.api.createEntityFromMeme("EventFailure.MalformedEvent")
except Exception as e:
testResult = "False"
errorMsg = ('Error creating entities! Traceback = %s' % (e) )
errata.append(errorMsg)
#execute for all.
try:
unusedReturnvalue = api.evaluateEntity(entity0)
#yes, in this testcase, valid tests throw exceptions
testResult = "False"
errorMsg = ('Error. execute event for EventFailure.BrokenLinkChangeTest should raise an Exceptions.ScriptError exception, but did not!')
errata.append(errorMsg)
except Exceptions.EventScriptFailure as e:
pass
except Exception as e:
testResult = "False"
erorMessage = ('Error. execute event for EventFailure.BrokenLinkChangeTest should raise an Exceptions.ScriptError exception, but did not!')
fullerror = sys.exc_info()
errorMsg = str(fullerror[1])
tb = sys.exc_info()[2]
erorMessage = "%s Traceback = %s %s" %(erorMessage, errorMsg, tb)
errata.append(erorMessage)
try:
unusedReturnvalue = api.evaluateEntity(entity1)
testResult = "False"
errorMsg = ('Error. execute event for EventFailure.ThrowsLinkChangeTest should raise an exception, but did not!')
errata.append(errorMsg)
except Exceptions.EventScriptFailure as e:
pass
except Exception as e:
testResult = "False"
erorMessage = ('Error. execute event for EventFailure.BrokenLinkChangeTest should raise an Exceptions.ScriptError exception, but did not!')
fullerror = sys.exc_info()
errorMsg = str(fullerror[1])
tb = sys.exc_info()[2]
erorMessage = "%s Traceback = %s %s" %(erorMessage, errorMsg, tb)
errata.append(erorMessage)
try:
unusedReturnvalue = api.evaluateEntity(entity2)
testResult = "False"
errorMsg = ('Error. execute event for EventFailure.MalformedEvent should raise an exception, but did not!')
errata.append(errorMsg)
except Exceptions.EventScriptFailure as e:
pass
except Exception as e:
testResult = "False"
erorMessage = ('Error. execute event for EventFailure.BrokenLinkChangeTest should raise an Exceptions.ScriptError exception, but did not!')
fullerror = sys.exc_info()
errorMsg = str(fullerror[1])
tb = sys.exc_info()[2]
erorMessage = "%s Traceback = %s %s" %(erorMessage, errorMsg, tb)
errata.append(erorMessage)
#propertyChanged.
try:
unusedReturnvalue = api.setEntityPropertyValue(entity0, "propB", "abc")
#yes, in this testcase, valid tests throw exceptions
testResult = "False"
errorMsg = ('Error. propertyChanged event for EventFailure.BrokenLinkChangeTest should raise an exception, but did not!')
errata.append(errorMsg)
except Exceptions.EventScriptFailure as e:
pass
except Exception as e:
testResult = "False"
erorMessage = ('Error. execute event for EventFailure.BrokenLinkChangeTest should raise an Exceptions.ScriptError exception, but did not!')
fullerror = sys.exc_info()
errorMsg = str(fullerror[1])
tb = sys.exc_info()[2]
erorMessage = "%s Traceback = %s %s" %(erorMessage, errorMsg, tb)
errata.append(erorMessage)
try:
unusedReturnvalue = api.setEntityPropertyValue(entity1, "propB", "abc")
testResult = "False"
errorMsg = ('Error. propertyChanged event for EventFailure.ThrowsLinkChangeTest should raise an exception, but did not!')
errata.append(errorMsg)
except Exceptions.EventScriptFailure as e:
pass
except Exception as e:
testResult = "False"
erorMessage = ('Error. execute event for EventFailure.BrokenLinkChangeTest should raise an Exceptions.ScriptError exception, but did not!')
fullerror = sys.exc_info()
errorMsg = str(fullerror[1])
tb = sys.exc_info()[2]
erorMessage = "%s Traceback = %s %s" %(erorMessage, errorMsg, tb)
errata.append(erorMessage)
#linkAdd
try:
unusedReturnvalue = api.addEntityLink(entity0, entity1)
#yes, in this testcase, valid tests throw exceptions
testResult = "False"
errorMsg = ('Error. linkAdd event for EventFailure.BrokenLinkChangeTest should raise an exception, but did not!')
errata.append(errorMsg)
except Exceptions.EventScriptFailure as e:
pass
except Exception as e:
testResult = "False"
erorMessage = ('Error. execute event for EventFailure.BrokenLinkChangeTest should raise an Exceptions.ScriptError exception, but did not!')
fullerror = sys.exc_info()
errorMsg = str(fullerror[1])
tb = sys.exc_info()[2]
erorMessage = "%s Traceback = %s %s" %(erorMessage, errorMsg, tb)
errata.append(erorMessage)
#linkRemove
try:
unusedReturnvalue = api.removeEntityLink(entity0, entity1)
#yes, in this testcase, valid tests throw exceptions
testResult = "False"
errorMsg = ('Error. linkRemove event for EventFailure.BrokenLinkChangeTest should raise an exception, but did not!')
errata.append(errorMsg)
except Exceptions.EventScriptFailure as e:
pass
except Exception as e:
testResult = "False"
erorMessage = ('Error. execute event for EventFailure.BrokenLinkChangeTest should raise an Exceptions.ScriptError exception, but did not!')
fullerror = sys.exc_info()
errorMsg = str(fullerror[1])
erorMessage = "%s Traceback = %s" %(erorMessage, errorMsg)
tb = sys.exc_info()[2]
#raise Exceptions.EventScriptFailure(errorMsg).with_traceback(tb)
errata.append(erorMessage)
testcase = "testLinkEvent()"
results = [1, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(1)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testInitializeEvent():
"""
Create one entity from EventInitRemove.InitRemoveEventTest.
Greate three generic entities
1 - Check that it has an AProp property and its value is 'Hello'
The meme has no proeprties, but the initialize event script adds the AProp property
"""
method = moduleName + '.' + 'testInitializeEvent'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
resultSet = []
errata = []
testResult = "True"
expectedResult = "True"
errorMsg = ""
#Create two entities from LinkEvent.LinkChangeTest.
#Greate three generic entities
try:
theEntity = Graph.api.createEntityFromMeme("EventInitRemove.InitRemoveEventTest")
except Exception as e:
testResult = "False"
errorMsg = ('Error creating entity! Traceback = %s' % (e) )
errata.append(errorMsg)
#1 - Link the a LinkEvent.LinkChangeTest entitiy with a generic one, with LinkChangeTest as the source
try:
retrunValue = api.getEntityPropertyValue(theEntity, "AProp")
if retrunValue != "Hello":
testResult = "False"
errorMsg = 'The initialize event script, EventInitRemove.OnInitialize, should add a property called AProp to EventInitRemove.InitRemoveEventTest and its value should be "Hello". It is actually "%s" !\n' %(retrunValue)
except Exception as e:
testResult = "False"
errorMsg = ('Error in initialze event script! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = "testInitializeEvent()"
results = [1, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(1)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testRemoveEvent():
"""
Locate the EventInitRemove.InitRemoveEventTest entity created in testInitializeEvent(). (it should be singular)
Delete it
1 - Check that delete script return value is 'Hello World'
The meme has no proeprties, but the initialize event script adds the AProp property
"""
method = moduleName + '.' + 'testInitializeEvent'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
resultSet = []
errata = []
testResult = "True"
expectedResult = "True"
errorMsg = ""
#Create two entities from LinkEvent.LinkChangeTest.
#Greate three generic entities
try:
theEntities = Graph.api.getEntitiesByMemeType("EventInitRemove.InitRemoveEventTest")
if len(theEntities) != 1:
testResult = "False"
errorMsg = 'One EventInitRemove.InitRemoveEventTest entity was created in the graph, during testInitializeEvent(). There can be only one! There are actually %s ' %(len(theEntities))
else:
theEntity = theEntities[0]
except Exception as e:
testResult = "False"
errorMsg = ('Error locating entity! Traceback = %s' % (e) )
errata.append(errorMsg)
#1 - Link the a LinkEvent.LinkChangeTest entitiy with a generic one, with LinkChangeTest as the source
try:
destroyReturn = api.destroyEntity(theEntity)
if destroyReturn != "Hello World":
testResult = "False"
errorMsg = 'The terminate event script, EventInitRemove.OnDelete, should return "Hello World". It actually returned "%s" !\n' %(destroyReturn)
except Exception as e:
testResult = "False"
errorMsg = ('Error in terminate event script! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = "testRemoveEvent()"
results = [1, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(1)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testAtomicSubatomic():
"""
Test atomic/subatomic links defined in memes.
"""
method = moduleName + '.' + 'testAtomicSubatomic'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
resultSet = []
errata = []
testResult = "True"
expectedResult = "True"
errorMsg = ""
#The testcase entities
try:
parentMeme1 = Graph.api.createEntityFromMeme("AtomicSubatomic.ParentMeme1") #Both shild entites have subatomic links
parentMeme2 = Graph.api.createEntityFromMeme("AtomicSubatomic.ParentMeme2") #One child has an atomic link, the other subatomic
parentMeme3 = Graph.api.createEntityFromMeme("AtomicSubatomic.ParentMeme3") #Both shild entites have atomic links
except Exception as e:
testResult = "False"
errorMsg = ('Error creating test entities! Traceback = %s' % (e) )
errata.append(errorMsg)
try:
pm1aChildren = api.getLinkCounterpartsByMetaMemeType(parentMeme1, "AtomicSubatomic.ChildMM", linkTypes.ATOMIC)
pm1sChildren = api.getLinkCounterpartsByMetaMemeType(parentMeme1, "AtomicSubatomic.ChildMM", linkTypes.SUBATOMIC)
if len(pm1sChildren) < 2:
testResult = "False"
errorMsg = "Meme AtomicSubatomic.ParentMeme1 should have two subatomic children. It actually has %s\n" %(len(pm1sChildren))
if len(pm1aChildren) > 0:
testResult = "False"
errorMsg = "Meme AtomicSubatomic.ParentMeme1 should have no atomic children. It actually has %s\n" %(len(pm1aChildren))
except Exception as e:
testResult = "False"
errorMsg = ('Error when searching for children of AtomicSubatomic.ParentMeme1! Traceback = %s' % (e) )
errata.append(errorMsg)
try:
pm2aChildren = api.getLinkCounterpartsByMetaMemeType(parentMeme2, "AtomicSubatomic.ChildMM", linkTypes.ATOMIC)
pm2sChildren = api.getLinkCounterpartsByMetaMemeType(parentMeme2, "AtomicSubatomic.ChildMM", linkTypes.SUBATOMIC)
if len(pm2sChildren) != 1:
testResult = "False"
errorMsg = "Meme AtomicSubatomic.ParentMeme2 should have one subatomic child. It actually has %s\n" %(len(pm2sChildren))
if len(pm2aChildren) != 1:
testResult = "False"
errorMsg = "Meme AtomicSubatomic.ParentMeme2 should have one atomic child. It actually has %s\n" %(len(pm2aChildren))
except Exception as e:
testResult = "False"
errorMsg = ('Error when searching for children of AtomicSubatomic.ParentMeme2! Traceback = %s' % (e) )
errata.append(errorMsg)
try:
pm3aChildren = api.getLinkCounterpartsByMetaMemeType(parentMeme3, "AtomicSubatomic.ChildMM", linkTypes.ATOMIC)
pm3sChildren = api.getLinkCounterpartsByMetaMemeType(parentMeme3, "AtomicSubatomic.ChildMM", linkTypes.SUBATOMIC)
if len(pm3sChildren) > 0:
testResult = "False"
errorMsg = "Meme AtomicSubatomic.ParentMeme1 should have no subatomic children. It actually has %s\n" %(len(pm3sChildren))
if len(pm3aChildren) < 2:
testResult = "False"
errorMsg = "Meme AtomicSubatomic.ParentMeme1 should have two atomic children. It actually has %s\n" %(len(pm3aChildren))
except Exception as e:
testResult = "False"
errorMsg = ('Error when searching for children of AtomicSubatomic.ParentMeme3! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = "testAtomicSubatomic()"
results = [1, testcase, testResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(1)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
######################
#End Test Block
#####################
def getResultPercentage(resultSet):
#results = [n, testcase, allTrueResult, expectedResult, errata]
totalTests = len(resultSet)
if totalTests == 0:
return 0
else:
partialResult = 0
if totalTests > 0:
for test in resultSet:
try:
if test[2].upper() == test[3].upper():
partialResult = partialResult + 1
except Exception as e:
print(e)
pp = partialResult/totalTests
resultPercentage = pp * 100
return int(resultPercentage)
def publishResults(testReports, css, fileName, titleText):
#testReport = {"resultSet" : resultSet, "validationTime" : validationTime, "persistence" : persistence.__name__}
#resultSet = [u"Condition (Remote Child)", copy.deepcopy(testSetData), testSetPercentage])
"Every report repeats exactly the same result sets, so we need only count onece"
testCaseCount = 0
exampleTestReport = testReports[0]
exampleResultSet = exampleTestReport["resultSet"]
for testScenario in exampleResultSet:
testCaseCount = testCaseCount + len(testScenario[2])
#Totals for time and number of test cases
numReports = len(testReports)
totalTCCount = testCaseCount * numReports
totalTCTime = 0.0
for countedTestReport in testReports:
totalTCTime = totalTCTime + countedTestReport["validationTime"]
# Create the minidom document
doc = minidom.Document()
# Create the <html> base element
html = doc.createElement("html")
# Create the <head> element
head = doc.createElement("head")
style = doc.createElement("style")
defaultCSS = doc.createTextNode(css)
style.appendChild(defaultCSS)
title = doc.createElement("title")
titleTextNode = doc.createTextNode(titleText)
title.appendChild(titleTextNode)
head.appendChild(style)
head.appendChild(title)
body = doc.createElement("body")
h1 = doc.createElement("h1")
h1Text = doc.createTextNode(titleText)
h1.appendChild(h1Text)
body.appendChild(h1)
h2 = doc.createElement("h2")
h2Text = doc.createTextNode("%s regression tests over %s persistence types in in %.1f seconds: %s" %(totalTCCount, numReports, totalTCTime, ctime()))
h2.appendChild(h2Text)
body.appendChild(h2)
h3 = doc.createElement("h2")
h3Text = doc.createTextNode("Entity Count at start of tests: %s" %(exampleTestReport["entityCount"]))
h3.appendChild(h3Text)
body.appendChild(h3)
"""
The Master table wraps all the result sets.
masterTableHeader contains all of the overview blocks
masterTableBody contains all of the detail elements
"""
masterTable = doc.createElement("table")
masterTableHeader = doc.createElement("table")
masterTableBody = doc.createElement("table")
for testReport in testReports:
masterTableHeaderRow = doc.createElement("tr")
masterTableBodyRow = doc.createElement("tr")
localValTime = testReport["validationTime"]
localPersistenceName = testReport["persistence"]
resultSet = testReport["resultSet"]
profileName = testReport["profileName"]
#Module Overview
numberOfColumns = 1
numberOfModules = len(resultSet)
if numberOfModules > 6:
numberOfColumns = 2
if numberOfModules > 12:
numberOfColumns = 3
if numberOfModules > 18:
numberOfColumns = 4
if numberOfModules > 24:
numberOfColumns = 5
rowsPerColumn = numberOfModules//numberOfColumns + 1
listPosition = 0
icTable = doc.createElement("table")
icTableHead= doc.createElement("thead")
icTableHeadText = doc.createTextNode("%s, %s: %.1f seconds" %(profileName, localPersistenceName, localValTime) )
icTableHead.appendChild(icTableHeadText)
icTableHead.setAttribute("class", "tableheader")
icTable.appendChild(icTableHead)
icTableFoot= doc.createElement("tfoot")
icTableFootText = doc.createTextNode("Problem test case sets are detailed in tables below" )
icTableFoot.appendChild(icTableFootText)
icTable.appendChild(icTableFoot)
icTableRow = doc.createElement("tr")
for unusedI in range(0, numberOfColumns):
bigCell = doc.createElement("td")
nestedTable = doc.createElement("table")
#Header
headers = ["", "Tests", "Valid"]
nestedTableHeaderRow = doc.createElement("tr")
for headerElement in headers:
nestedCell = doc.createElement("th")
nestedCellText = doc.createTextNode("%s" %headerElement)
nestedCell.appendChild(nestedCellText)
nestedTableHeaderRow.appendChild(nestedCell)
#nestedTableHeaderRow.setAttribute("class", "tableHeaderRow")
nestedTable.appendChild(nestedTableHeaderRow)
for dummyJ in range(0, rowsPerColumn):
currPos = listPosition
listPosition = listPosition + 1
if listPosition <= numberOfModules:
try:
moduleReport = resultSet[currPos]
#Write Data Row To Table
row = doc.createElement("tr")
#Module Name is first cell
cell = doc.createElement("td")
cellText = doc.createTextNode("%s" %moduleReport[0])
hyperlinkNode = doc.createElement("a")
hyperlinkNode.setAttribute("href", "#%s%s" %(moduleReport[0], localPersistenceName))
hyperlinkNode.appendChild(cellText)
cell.appendChild(hyperlinkNode)
if moduleReport[1] < 100:
row.setAttribute("class", "badOverviewRow")
else:
row.setAttribute("class", "goodOverviewRow")
row.appendChild(cell)
rowData = [len(moduleReport[2]), "%s %%" %moduleReport[1]]
for dataEntry in rowData:
percentCell = doc.createElement("td")
percentCellText = doc.createTextNode("%s" %dataEntry)
percentCell.appendChild(percentCellText)
row.appendChild(percentCell)
nestedTable.appendChild(row)
except:
pass
else:
row = doc.createElement("tr")
cell = doc.createElement("td")
cellText = doc.createTextNode("")
cell.appendChild(cellText)
row.appendChild(cellText)
nestedTable.appendChild(row)
nestedTable.setAttribute("class", "subdivision")
bigCell.appendChild(nestedTable)
icTableRow.appendChild(bigCell)
icTableDiv = doc.createElement("div")
icTableDiv.setAttribute("class", "vAlignment")
icTableDiv.appendChild(icTableRow)
icTable.appendChild(icTableDiv)
#Add some blank spave before icTable
frontSpacer = doc.createElement("div")
frontSpacer.setAttribute("class", "vBlankSpace")
frontSpacer.appendChild(icTable)
masterTableDiv = doc.createElement("div")
masterTableDiv.setAttribute("class", "vAlignment")
masterTableDiv.appendChild(frontSpacer)
masterTableHeaderRow.appendChild(masterTableDiv)
masterTableHeader.appendChild(masterTableHeaderRow)
#Individual Data Sets
for testSet in resultSet:
#first, build up the "outer" table header, which has the header
idHash = "%s%s" %(testSet[0], localPersistenceName)
oTable = doc.createElement("table")
oTable.setAttribute("style", "border-style:solid")
tableHeader= doc.createElement("thead")
tableHeaderText = doc.createTextNode("%s (%s)" %(testSet[0], localPersistenceName) )
tableAnchor = doc.createElement("a")
tableAnchor.setAttribute("id", idHash)
tableAnchor.appendChild(tableHeaderText)
tableHeader.appendChild(tableAnchor)
tableHeader.setAttribute("class", "tableheader")
oTable.appendChild(tableHeader)
oTableRow = doc.createElement("tr")
oTableContainer = doc.createElement("td")
#Inner Table
table = doc.createElement("table")
headers = ["#", "Test Case", "Result", "Expected Result", "Notes"]
tableHeaderRow = doc.createElement("tr")
for headerEntry in headers:
cell = doc.createElement("th")
cellText = doc.createTextNode("%s" %headerEntry)
cell.appendChild(cellText)
cell.setAttribute("class", "tableHeaderRow")
tableHeaderRow.appendChild(cell)
table.appendChild(tableHeaderRow)
for fullTestRow in testSet[2]:
#fullTestRow = [n, testcase, allTrueResult, expectedResult, errata]
test = [fullTestRow[0], fullTestRow[1], fullTestRow[2], fullTestRow[3]]
tableRow = doc.createElement("tr")
for dataEntry in test:
cell = doc.createElement("td")
cellText = doc.createTextNode("%s" %dataEntry)
cell.appendChild(cellText)
cell.setAttribute("class", "detailsCell")
tableRow.appendChild(cell)
try:
if test[2].upper() != test[3].upper():
#then mark the whole row as red
tableRow.setAttribute("class", "badDRow")
else:
tableRow.setAttribute("class", "goodDRow")
except:
cell = doc.createElement("td")
cellText = doc.createTextNode("Please check Testcase code: actual test result = %s, expected = %s" %(test[2], test[3]))
cell.appendChild(cellText)
cell.setAttribute("class", "detailsCell")
tableRow.appendChild(cell)
tableRow.setAttribute("class", "badDRow")
errataCell = doc.createElement("td")
if type(fullTestRow[4]) == type([]):
filteredErrata = Graph.filterListDuplicates(fullTestRow[4])
for bulletpointElement in filteredErrata:
paragraph = doc.createElement("p")
pText = doc.createTextNode("%s" %bulletpointElement)
paragraph.appendChild(pText)
errataCell.appendChild(paragraph)
tableRow.appendChild(cell)
else:
filteredErrata = Graph.filterListDuplicates(fullTestRow[4])
paragraph = doc.createElement("p")
pText = doc.createTextNode("%s" %filteredErrata)
paragraph.appendChild(pText)
#rowValidityCell.appendChild(paragraph)
errataCell.appendChild(paragraph)
tableRow.appendChild(errataCell)
table.appendChild(tableRow)
oTableContainer.appendChild(table)
oTableRow.appendChild(oTableContainer)
oTable.appendChild(oTableRow)
#Add some blank spave before any tables
tableSpacer = doc.createElement("div")
tableSpacer.setAttribute("class", "vBlankSpace")
tableSpacer.appendChild(oTable)
masterTableDivL = doc.createElement("div")
masterTableDivL.setAttribute("class", "vAlignment")
masterTableDivL.appendChild(tableSpacer)
masterTableBodyRow.appendChild(masterTableDivL)
masterTableBody.appendChild(masterTableBodyRow)
masterTable.appendChild(masterTableHeader)
masterTable.appendChild(masterTableBody)
body.appendChild(masterTable)
html.appendChild(head)
html.appendChild(body)
doc.appendChild(html)
fileStream = doc.toprettyxml(indent = " ")
logRoot = expanduser("~")
logDir = os.path.join(logRoot, "Graphyne")
if not os.path.exists(logDir):
os.makedirs(logDir)
resultFileLoc = os.path.join(logDir, fileName)
fileObject = open(resultFileLoc, "w", encoding="utf-8")
#fileObject.write(Fileutils.smart_str(fileStream))
fileObject.write(fileStream)
fileObject.close()
def usage():
print(__doc__)
def runTests(css):
global testImplicit
method = moduleName + '.' + 'main'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
#Make sure that we have a script facade available
global api
api = Graph.api.getAPI()
# A line to prevent pydev from complaining about unused variables
dummyIgnoreThis = str(api)
# a helper item for debugging whther or not a particular entity is in the repo
debugHelperIDs = api.getAllEntities()
for debugHelperID in debugHelperIDs:
try:
debugHelperMemeType = api.getEntityMemeType(debugHelperID)
entityList.append([str(debugHelperID), debugHelperMemeType])
except Exception as unusedE:
#This exception is normally left as a pass. If you need to debug the preceeding code, then uncomment the block below.
# The exception is called 'unusedE', so that Pydev will ignore the unused variable
#errorMessage = "debugHelperMemeType warning in Smoketest.Runtests. Traceback = %s" %unusedE
#Graph.logQ.put( [logType , logLevel.WARNING , method , errorMessage])
pass
#test
resultSet = []
print("Meta Meme Properties")
testSetData = testMetaMemeProperty()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Meta Meme Properties", testSetPercentage, copy.deepcopy(testSetData)])
print("Meta Meme Singleton")
testSetData = testMetaMemeSingleton()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Meta Meme Singleton", testSetPercentage, copy.deepcopy(testSetData)])
print("Meta Meme Switch")
testSetData = testMetaMemeSwitch()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Meta Meme Switch", testSetPercentage, copy.deepcopy(testSetData)])
print("Meta Meme Enhancements")
testSetData = testMetaMemeEnhancements()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Meta Meme Enhancements", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testMemeValidity()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Meme Validity", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testEntityPhase1()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 1", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testEntityPhase1_1()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 1.1", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testEntityPhase2()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 2", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testEntityPhase2_1()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 2.1", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testEntityPhase3()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 3", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testEntityPhase3_1()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 3.1", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testEntityPhase4()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 4", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testEntityPhase4_1()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 4.1", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testEntityPhase1('testEntityPhase5', 'Entity_Phase5.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 5", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testEntityPhase1_1('testEntityPhase5.1', 'Entity_Phase5.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 5.1", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testEntityPhase6()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 6", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testEntityPhase6_1()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 6.1", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testEntityPhase7()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 7", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testLinkCounterpartsByMetaMemeType()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 7.1", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testEntityPhase2('testEntityPhase8', 'Entity_Phase8.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 8", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testEntityPhase2_1('testEntityPhase8_1', 'Entity_Phase8.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 8.1", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testEntityPhase9()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 9", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testEntityPhase10()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 10", testSetPercentage, copy.deepcopy(testSetData)])
#Repeats 7, but with directional references
testSetData = testEntityPhase7('testEntityPhase11', "Entity_Phase11.atest")
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Phase 11", testSetPercentage, copy.deepcopy(testSetData)])
#Repeats 7, but with directionasl references filters
testSetData = testTraverseParams()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Traverse Params", testSetPercentage, copy.deepcopy(testSetData)])
#NumericValue.atest
testSetData = testNumericValue('NumericValue.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["NumericValue", testSetPercentage, copy.deepcopy(testSetData)])
if (testImplicit == True):
print("Implicit Memes")
testSetData = testImplicitMeme()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Implicit Meme", testSetPercentage, copy.deepcopy(testSetData)])
else:
print("No Persistence: Skipping Implicit Memes")
print("Conditions")
testSetData = testCondition('ConditionSimple.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Condition (Simple)", testSetPercentage, copy.deepcopy(testSetData)])
#ConditionSet.atest
testSetData = testCondition('ConditionSet.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Condition (Set)", testSetPercentage, copy.deepcopy(testSetData)])
# Script Conditions
testSetData = testCondition('ConditionScript.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Condition (Script)", testSetPercentage, copy.deepcopy(testSetData)])
#Child conditions in remote packages
testSetData = testCondition('ConditionRemotePackage.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Condition (Remote Child)", testSetPercentage, copy.deepcopy(testSetData)])
#String and Numeric Conditions with Agent Attributes
testSetData = testAACondition('ConditionAA.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Condition (Agent Attributes)", testSetPercentage, copy.deepcopy(testSetData)])
#String and Numeric Conditions with Multi Agent Attributes
testSetData = testAACondition('ConditionMAA.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Condition (Multi Agent Attributes)", testSetPercentage, copy.deepcopy(testSetData)])
#Creating source metamemes via the script facade
testSetData = testSourceCreateMeme('SourceCreateMeme.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Editor Meme Creation", testSetPercentage, copy.deepcopy(testSetData)])
#Set a source meme property via the script facade
testSetData = testSourceProperty('SourceProperty.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Editor Meme Property Set", testSetPercentage, copy.deepcopy(testSetData)])
#Delete a source meme property via the script facade
testSetData = testSourcePropertyRemove('SourceProperty.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Editor Meme Property Remove", testSetPercentage, copy.deepcopy(testSetData)])
#Add a member meme via the script facade
testSetData = testSourceMember('SourceMember.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Editor Member Meme Add", testSetPercentage, copy.deepcopy(testSetData)])
#Remove a member meme via the script facade
testSetData = testSourceMemberRemove('SourceMember.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Editor Member Meme Remove", testSetPercentage, copy.deepcopy(testSetData)])
#Add an enhancement via the script facade
testSetData = testSourceEnhancement('SourceEnhancement.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Editor Enhancement Add", testSetPercentage, copy.deepcopy(testSetData)])
#Remove an enhancement via the script facade
testSetData = testSourceEnhancementRemove('SourceEnhancement.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Editor Enhancement Remove", testSetPercentage, copy.deepcopy(testSetData)])
#Set the singleton flag via the script facade
testSetData = testSourceSingletonSet('SourceCreateMeme.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Editor Singleton Setting", testSetPercentage, copy.deepcopy(testSetData)])
#Create a Generic entity and check to see that it's meme is Graphyne.Generic
testSetData = testGeneric()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Generic Entity", testSetPercentage, copy.deepcopy(testSetData)])
#Test Entity Deletion
testSetData = testDeleteEntity()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Deletion", testSetPercentage, copy.deepcopy(testSetData)])
#Atomic and subatomic links
testSetData = testSubatomicLinks()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Subatomic Links", testSetPercentage, copy.deepcopy(testSetData)])
#getting the cluster member list
testSetData = testGetClusterMembers()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Cluster Member List", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testGetHasCounterpartsByType()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Has Counterparts by Type", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testGetEntityMetaMemeType()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["API method testGetEntityMetaMemeType", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testInstallExecutor()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["API method testInstallExecutor", testSetPercentage, copy.deepcopy(testSetData)])
#getting the cluster dictionary
testSetData = testGetCluster()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Cluster", testSetPercentage, copy.deepcopy(testSetData)])
#testRevertEntity
testSetData = testRevertEntity()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["API Method revertEntity", testSetPercentage, copy.deepcopy(testSetData)])
#testPropertyChangeEvent
testSetData = testPropertyChangeEvent()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Property Change Event", testSetPercentage, copy.deepcopy(testSetData)])
#testLinkEvent
testSetData = testLinkEvent()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Link Event", testSetPercentage, copy.deepcopy(testSetData)])
#testBrokenEvents
testSetData = testBrokenEvents()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Broken Event", testSetPercentage, copy.deepcopy(testSetData)])
#testLinkEvent
testSetData = testInitializeEvent()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Initialize Event", testSetPercentage, copy.deepcopy(testSetData)])
#testBrokenEvents
testSetData = testRemoveEvent()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Remove Event", testSetPercentage, copy.deepcopy(testSetData)])
#testAtomicSubatomic
testSetData = testAtomicSubatomic()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Atomic and Subatomic", testSetPercentage, copy.deepcopy(testSetData)])
#testGetTraverseReport
testSetData = testGetTraverseReport()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Traverse Report", testSetPercentage, copy.deepcopy(testSetData)])
#endTime = time.time()
#validationTime = endTime - startTime
#publishResults(resultSet, validationTime, css)
return resultSet
#Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
def smokeTestSet(persistence, lLevel, css, profileName, persistenceArg = None, persistenceType = None, resetDatabase = False, createTestDatabase = False, scaleFactor = 0):
'''
repoLocations = a list of all of the filesystem location that that compose the repository.
useDeaultSchema. I True, then load the 'default schema' of Graphyne
persistenceType = The type of database used by the persistence engine. This is used to determine which flavor of SQL syntax to use.
Enumeration of Possible values:
Default to None, which is no persistence
"sqlite" - Sqlite3
"mssql" - Miscrosoft SQL Server
"hana" - SAP Hana
persistenceArg = the Module/class supplied to host the entityRepository and LinkRepository. If default, then use the Graphyne.DatabaseDrivers.NonPersistent module.
Enumeration of possible values:
None - May only be used in conjunction with "sqlite" as persistenceType and will throw an InconsistentPersistenceArchitecture otherwise
"none" - no persistence. May only be used in conjunction with "sqlite" as persistenceType and will throw an InconsistentPersistenceArchitecture otherwise
"memory" - Use SQLite in in-memory mode (connection = ":memory:")
"<valid filename with .sqlite as extension>" - Use SQLite, with that file as the database
"<filename with .sqlite as extension, but no file>" - Use SQLite and create that file to use as the DB file
"<anything else>" - Presume that it is a pyodbc connection string and throw a InconsistentPersistenceArchitecture exception if the dbtype is "sqlite".
createTestDatabase = a flag for creating regression test data. This flag is only to be used for regression testing the graph and even then, only if the test
database does not already exist.
scaleFactor = Scale factor (S). Given N non-singleton memes, N*S "ballast" entities will be created in the DB before starting the test suite. This allows us
to use larger datasets to test scalability (at least with regards to entity repository size)
*If persistenceType is None (no persistence, then this is ignored and won't throw any InconsistentPersistenceArchitecture exceptions)
'''
global testImplicit
print(("\nStarting Graphyne Smoke Test: %s") %(persistence.__name__))
print(("...%s: Engine Start") %(persistence.__name__))
#Only test implicit memes in the case that we are using persistence
if persistenceType is None:
testImplicit = False
#Don't validate the repo when we are performance testing
if scaleFactor < 1:
validateOnLoad = True
else:
validateOnLoad = False
time.sleep(10.0)
installFilePath = os.path.dirname(__file__)
testRepo = os.path.join(installFilePath, "Config", "Test", "TestRepository")
#mainAngRepo = os.path.join(os.environ['ANGELA_HOME'], "RMLRepository")
try:
Graph.startLogger(lLevel)
Graph.startDB([testRepo], persistenceType, persistenceArg, True, resetDatabase, True, validateOnLoad)
except Exception as e:
print(("Graph not started. Traceback = %s" %e))
raise e
print(("...Engine Started: %s") %persistence.__name__)
time.sleep(30.0)
print(("...%s: Engine Started") %(persistence.__name__))
#If scaleFactor > 0, then we are also testing performance
if (scaleFactor > 0):
print("Performance Test: ...Creating Content")
for unusedj in range(1, scaleFactor):
for moduleID in Graph.templateRepository.modules.keys():
if moduleID != "BrokenExamples":
#The module BrokenExamples contaons mmemes that are deliberately malformed. Don't beother with these
module = Graph.templateRepository.modules[moduleID]
for listing in module:
template = Graph.templateRepository.resolveTemplateAbsolutely(listing[1])
if template.className == "Meme":
if template.isSingleton != True:
try:
unusedEntityID = Graph.api.createEntityFromMeme(template.path.fullTemplatePath)
except Exception as e:
pass
print("Performance Test: Finished Creating Content")
# /Scale Factor'
entityCount = Graph.countEntities()
startTime = time.time()
try:
resultSet = runTests(css)
except Exception as e:
print(("test run problem. Traceback = %s" %e))
raise e
endTime = time.time()
validationTime = endTime - startTime
testReport = {"resultSet" : resultSet, "validationTime" : validationTime, "persistence" : persistence.__name__, "profileName" : profileName, "entityCount" : entityCount}
#publishResults(resultSet, validationTime, css)
print(("...%s: Test run finished. Waiting 30 seconds for log thread to catch up before starting shutdown") %(persistence.__name__))
time.sleep(30.0)
print(("...%s: Engine Stop (%s)") %(persistence.__name__, profileName))
Graph.stopLogger()
print(("...%s: Engine Stopped (%s)") %(persistence.__name__, profileName))
return testReport
if __name__ == "__main__":
print("\nStarting Graphyne Smoke Test")
parser = argparse.ArgumentParser(description="Graphyne Smoke Test")
parser.add_argument("-l", "--logl", type=str, help="|String| Graphyne's log level during the validation run. \n Options are (in increasing order of verbosity) 'warning', 'info' and 'debug'. \n Default is 'warning'")
parser.add_argument("-r", "--resetdb", type=str, help="|String| Reset the esisting persistence DB This defaults to true and is only ever relevant when Graphyne is using relational database persistence.")
parser.add_argument("-d", "--dbtype", type=str, help="|String| The database type to be used. If --dbtype is a relational database, it will also determine which flavor of SQL syntax to use.\n Possible options are 'none', 'sqlite', 'mssql' and 'hana'. \n Default is 'none'")
parser.add_argument("-c", "--dbtcon", type=str, help="|String| The database connection string (if a relational DB) or filename (if SQLite).\n 'none' - no persistence. This is the default value\n 'memory' - Use SQLite in in-memory mode (connection = ':memory:') None persistence defaults to memory id SQlite is used\n '<valid filename>' - Use SQLite, with that file as the database\n <filename with .sqlite as extension, but no file> - Use SQLite and create that file to use as the DB file\n <anything else> - Presume that it is a pyodbc connection string")
args = parser.parse_args()
lLevel = Graph.logLevel.WARNING
if args.logl:
if args.logl == "info":
lLevel = Graph.logLevel.INFO
print("\n -- log level = 'info'")
elif args.logl == "debug":
lLevel = Graph.logLevel.DEBUG
print("\n -- log level = 'debug'")
elif args.logl == "warning":
pass
else:
print("Invalid log level %s! Permitted valies of --logl are 'warning', 'info' and 'debug'!" %args.logl)
sys.exit()
persistenceType = None
if args.dbtype:
if (args.dbtype is None) or (args.dbtype == 'none'):
pass
elif (args.dbtype == 'sqlite') or (args.dbtype == 'mssql') or (args.dbtype == 'hana'):
persistenceType = args.dbtype
print("\n -- using persistence type %s" %args.dbtype)
else:
print("Invalid persistence type %s! Permitted valies of --dbtype are 'none', 'sqlite', 'mssql' and 'hana'!" %args.logl)
sys.exit()
dbConnectionString = None
if args.dbtcon:
if (args.dbtcon is None) or (args.dbtcon == 'none'):
if persistenceType is None:
print("\n -- Using in-memory persistence (no connection required)")
elif persistenceType == 'sqlite':
dbConnectionString = 'memory'
print("\n -- Using sqlite persistence with connection = :memory:")
else:
print("\n -- Persistence type %s requires a valid database connection. Please provide a --dbtcon argument!" %persistenceType)
sys.exit()
elif args.dbtcon == 'memory':
if persistenceType is None:
#memory is a valid alternative to none with no persistence
print("\n -- Using in-memory persistence (no connection required)")
elif persistenceType == 'sqlite':
dbConnectionString = args.dbtcon
print("\n -- Using sqlite persistence with connection = :memory:")
else:
print("\n -- Persistence type %s requires a valid database connection. Please provide a --dbtcon argument!" %persistenceType)
sys.exit()
else:
dbConnectionString = args.dbtcon
if persistenceType == 'sqlite':
if dbConnectionString.endswith(".sqlite"):
print("\n -- Using sqlite persistence with file %s" %dbConnectionString)
else:
print("\n -- Using sqlite persistence with invalid filename %s. It must end with the .sqlite extension" %dbConnectionString)
sys.exit()
else:
print("\n -- Using persistence type %s with connection = %s" %(args.dbtype, args.dbtcon))
resetDatabase = True
if args.resetdb:
if args.logl.lower() == "false":
resetDatabase = False
print((" ...params: log level = %s, db driver = %s, connection string = %s" %(lLevel, persistenceType, dbConnectionString)))
testReport = None
css = Fileutils.defaultCSS()
try:
if persistenceType is None:
from graphyne.DatabaseDrivers import NonPersistent as persistenceModule1
testReport = smokeTestSet(persistenceModule1, lLevel, css, "No-Persistence", dbConnectionString, persistenceType, resetDatabase, True)
elif ((persistenceType == "sqlite") and (dbConnectionString== "memory")):
from graphyne.DatabaseDrivers import RelationalDatabase as persistenceModule2
testReport = smokeTestSet(persistenceModule2, lLevel, css, "sqllite", dbConnectionString, persistenceType, resetDatabase, True)
elif persistenceType == "sqlite":
from graphyne.DatabaseDrivers import RelationalDatabase as persistenceModule4
testReport = smokeTestSet(persistenceModule4, lLevel, css, "sqllite", dbConnectionString, persistenceType, resetDatabase)
else:
from graphyne.DatabaseDrivers import RelationalDatabase as persistenceModul3
testReport = smokeTestSet(persistenceModul3, lLevel, css, persistenceType, dbConnectionString, persistenceType, resetDatabase)
except Exception as e:
from graphyne.DatabaseDrivers import RelationalDatabase as persistenceModul32
testReport = smokeTestSet(persistenceModul32, lLevel, css, persistenceType, dbConnectionString, persistenceType, resetDatabase)
titleText = "Graphyne Smoke Test Suite - Results"
publishResults([testReport], css, "GraphyneTestResult.html", titleText) | apache-2.0 |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v2.8/gnmt-tpuv2-8/code/gnmt/model/t2t/tensor2tensor/layers/transformer_layers.py | 3 | 13829 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commonly re-used transformer layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import expert_utils
from tensor2tensor.utils import mlperf_log
import tensorflow as tf
def transformer_prepare_encoder(inputs, target_space, hparams, features=None):
"""Prepare one shard of the model for the encoder.
Args:
inputs: a Tensor.
target_space: a Tensor.
hparams: run hyperparameters
features: optionally pass the entire features dictionary as well.
This is needed now for "packed" datasets.
Returns:
encoder_input: a Tensor, bottom of encoder stack
encoder_self_attention_bias: a bias tensor for use in encoder self-attention
encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder
attention
"""
ishape_static = inputs.shape.as_list()
encoder_input = inputs
if features and "inputs_segmentation" in features:
# Packed dataset. Keep the examples from seeing each other.
inputs_segmentation = features["inputs_segmentation"]
inputs_position = features["inputs_position"]
targets_segmentation = features["targets_segmentation"]
encoder_self_attention_bias = common_attention.attention_bias_same_segment(
inputs_segmentation, inputs_segmentation)
encoder_decoder_attention_bias = (
common_attention.attention_bias_same_segment(targets_segmentation,
inputs_segmentation))
else:
# Usual case - not a packed dataset.
encoder_padding = common_attention.embedding_to_padding(encoder_input)
ignore_padding = common_attention.attention_bias_ignore_padding(
encoder_padding)
encoder_self_attention_bias = ignore_padding
encoder_decoder_attention_bias = ignore_padding
inputs_position = None
if hparams.proximity_bias:
encoder_self_attention_bias += common_attention.attention_bias_proximal(
common_layers.shape_list(inputs)[1])
if hparams.get("use_target_space_embedding", True):
# Append target_space_id embedding to inputs.
emb_target_space = common_layers.embedding(
target_space,
32,
ishape_static[-1],
name="target_space_embedding",
dtype=tf.bfloat16
if hparams.activation_dtype == "bfloat16" else tf.float32)
emb_target_space = tf.reshape(emb_target_space, [1, 1, -1])
encoder_input += emb_target_space
if hparams.pos == "timing":
if inputs_position is not None:
encoder_input = common_attention.add_timing_signal_1d_given_position(
encoder_input, inputs_position)
else:
encoder_input = common_attention.add_timing_signal_1d(encoder_input)
elif hparams.pos == "emb":
encoder_input = common_attention.add_positional_embedding(
encoder_input, hparams.max_length, "inputs_positional_embedding",
inputs_position)
if hparams.activation_dtype == "bfloat16":
encoder_self_attention_bias = tf.cast(encoder_self_attention_bias,
tf.bfloat16)
encoder_decoder_attention_bias = tf.cast(encoder_decoder_attention_bias,
tf.bfloat16)
return (encoder_input, encoder_self_attention_bias,
encoder_decoder_attention_bias)
def transformer_encoder(encoder_input,
encoder_self_attention_bias,
hparams,
name="encoder",
nonpadding=None,
save_weights_to=None,
make_image_summary=True,
losses=None):
"""A stack of transformer layers.
Args:
encoder_input: a Tensor
encoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
nonpadding: optional Tensor with shape [batch_size, encoder_length]
indicating what positions are not padding. This must either be
passed in, which we do for "packed" datasets, or inferred from
encoder_self_attention_bias. The knowledge about padding is used
for pad_remover(efficiency) and to mask out padding in convolutional
layers.
save_weights_to: an optional dictionary to capture attention weights
for visualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
losses: optional list onto which to append extra training losses
Returns:
y: a Tensors
"""
x = encoder_input
attention_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "attention_dropout_broadcast_dims", "")))
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NUM_HIDDEN_LAYERS,
value=hparams.num_encoder_layers or hparams.num_hidden_layers)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_ATTENTION_DROPOUT,
value=hparams.attention_dropout)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_ATTENTION_DENSE,
value={
"use_bias": "false",
"num_heads": hparams.num_heads,
"hidden_size": hparams.hidden_size
})
with tf.variable_scope(name):
if nonpadding is not None:
padding = 1.0 - nonpadding
else:
padding = common_attention.attention_bias_to_padding(
encoder_self_attention_bias)
nonpadding = 1.0 - padding
pad_remover = None
if hparams.use_pad_remover and not common_layers.is_xla_compiled():
pad_remover = expert_utils.PadRemover(padding)
for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("self_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
None,
encoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
max_relative_position=hparams.max_relative_position,
heads_share_relative_embedding=(
hparams.heads_share_relative_embedding),
add_relative_to_values=hparams.add_relative_to_values,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
max_length=hparams.get("max_length"),
vars_3d=hparams.get("attention_variables_3d"))
x = common_layers.layer_postprocess(x, y, hparams)
with tf.variable_scope("ffn"):
y = transformer_ffn_layer(
common_layers.layer_preprocess(x, hparams),
hparams,
pad_remover,
conv_padding="SAME",
nonpadding_mask=nonpadding,
losses=losses)
x = common_layers.layer_postprocess(x, y, hparams)
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NORM,
value={"hidden_size": hparams.hidden_size})
return common_layers.layer_preprocess(x, hparams)
def transformer_ffn_layer(x,
hparams,
pad_remover=None,
conv_padding="LEFT",
nonpadding_mask=None,
losses=None,
cache=None,
decode_loop_step=None,
readout_filter_size=0):
"""Feed-forward layer in the transformer.
Args:
x: a Tensor of shape [batch_size, length, hparams.hidden_size]
hparams: hyperparameters for model
pad_remover: an expert_utils.PadRemover object tracking the padding
positions. If provided, when using convolutional settings, the padding
is removed before applying the convolution, and restored afterward. This
can give a significant speedup.
conv_padding: a string - either "LEFT" or "SAME".
nonpadding_mask: an optional Tensor with shape [batch_size, length].
needed for convolutional layers with "SAME" padding.
Contains 1.0 in positions corresponding to nonpadding.
losses: optional list onto which to append extra training losses
cache: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop.
Only used for inference on TPU.
readout_filter_size: if it's greater than 0, then it will be used instead of
filter_size
Returns:
a Tensor of shape [batch_size, length, hparams.hidden_size]
Raises:
ValueError: If losses arg is None, but layer generates extra losses.
"""
ffn_layer = hparams.ffn_layer
relu_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "relu_dropout_broadcast_dims", "")))
if ffn_layer == "conv_hidden_relu":
# Backwards compatibility
ffn_layer = "dense_relu_dense"
if ffn_layer == "dense_relu_dense":
# In simple convolution mode, use `pad_remover` to speed up processing.
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_FFN_FILTER_DENSE,
value={
"filter_size": hparams.filter_size,
"use_bias": "True",
"activation": mlperf_log.RELU
})
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_FFN_OUTPUT_DENSE,
value={
"hidden_size": hparams.hidden_size,
"use_bias": "True",
})
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_RELU_DROPOUT, value=hparams.relu_dropout)
if pad_remover:
original_shape = common_layers.shape_list(x)
# Collapse `x` across examples, and remove padding positions.
x = tf.reshape(x, tf.concat([[-1], original_shape[2:]], axis=0))
x = tf.expand_dims(pad_remover.remove(x), axis=0)
conv_output = common_layers.dense_relu_dense(
x,
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout,
dropout_broadcast_dims=relu_dropout_broadcast_dims)
if pad_remover:
# Restore `conv_output` to the original shape of `x`, including padding.
conv_output = tf.reshape(
pad_remover.restore(tf.squeeze(conv_output, axis=0)), original_shape)
return conv_output
elif ffn_layer == "conv_relu_conv":
return common_layers.conv_relu_conv(
x,
readout_filter_size or hparams.filter_size,
hparams.hidden_size,
first_kernel_size=hparams.conv_first_kernel,
second_kernel_size=1,
padding=conv_padding,
nonpadding_mask=nonpadding_mask,
dropout=hparams.relu_dropout,
cache=cache,
decode_loop_step=decode_loop_step)
elif ffn_layer == "parameter_attention":
return common_attention.parameter_attention(
x, hparams.parameter_attention_key_channels or hparams.hidden_size,
hparams.parameter_attention_value_channels or hparams.hidden_size,
hparams.hidden_size, readout_filter_size or hparams.filter_size,
hparams.num_heads,
hparams.attention_dropout)
elif ffn_layer == "conv_hidden_relu_with_sepconv":
return common_layers.conv_hidden_relu(
x,
readout_filter_size or hparams.filter_size,
hparams.hidden_size,
kernel_size=(3, 1),
second_kernel_size=(31, 1),
padding="LEFT",
dropout=hparams.relu_dropout)
elif ffn_layer == "sru":
return common_layers.sru(x)
elif ffn_layer == "local_moe_tpu":
overhead = (
hparams.moe_overhead_train
if hparams.mode == tf.estimator.ModeKeys.TRAIN else
hparams.moe_overhead_eval)
ret, loss = expert_utils.local_moe_tpu(
x,
hparams.filter_size // 2,
hparams.hidden_size,
hparams.moe_num_experts,
overhead=overhead,
loss_coef=hparams.moe_loss_coef)
elif ffn_layer == "local_moe":
overhead = (
hparams.moe_overhead_train
if hparams.mode == tf.estimator.ModeKeys.TRAIN else
hparams.moe_overhead_eval)
ret, loss = expert_utils.local_moe(
x,
True,
expert_utils.ffn_expert_fn(hparams.hidden_size, [hparams.filter_size],
hparams.hidden_size),
hparams.moe_num_experts,
k=hparams.moe_k,
hparams=hparams)
losses.append(loss)
return ret
else:
assert ffn_layer == "none"
return x
| apache-2.0 |
tomsilver/nupic | tests/swarming/nupic/swarming/experiments/input_predicted_field/description.py | 1 | 14270 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupicengine/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer
)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [ (u'timestamp', 'first'),
(u'consumption', 'sum'),
],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# 'encoders': {'field1': {'fieldname': 'field1', 'n':100,
# 'name': 'field1', 'type': 'AdaptiveScalarEncoder',
# 'w': 21}}
#
'encoders': {
'consumption': {
'clipInput': True,
'fieldname': u'consumption',
'n': 100,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'address': {
'fieldname': u'address',
'n': 300,
'name': u'address',
'type': 'SDRCategoryEncoder',
'w': 21},
'gym': {
'fieldname': u'gym',
'n': 100,
'name': u'gym',
'type': 'SDRCategoryEncoder',
'w': 21},
'timestamp_dayOfWeek': {
'dayOfWeek': (7, 3),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': {
'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (7, 8),
'type': 'DateEncoder'},
'_classifierInput': {
'name': u'_classifierInput',
'fieldname': u'consumption',
'classifierOnly': True,
'type': 'AdaptiveScalarEncoder',
'clipInput': True,
'n': 100,
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : { u'days': 0, u'hours': 0},
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': None},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/frameworks/opf/jsonschema/stream_def.json.
#
'dataset' : {
u'info': u'test_hotgym',
u'streams': [ { u'columns': [u'*'],
u'info': u'test data',
u'source': u'file://swarming/test_data.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'consumption', u'predictionSteps': [1]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'consumption', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'window': 1000, 'steps': [1], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
################################################################################
################################################################################
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| gpl-3.0 |
LohithBlaze/scikit-learn | benchmarks/bench_plot_svd.py | 322 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
jzt5132/scikit-learn | sklearn/linear_model/bayes.py | 219 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
MohammedWasim/scikit-learn | sklearn/linear_model/bayes.py | 219 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
MohammedWasim/scikit-learn | examples/mixture/plot_gmm_sin.py | 247 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
jzt5132/scikit-learn | sklearn/utils/tests/test_testing.py | 106 | 4210 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
jzt5132/scikit-learn | sklearn/neighbors/base.py | 71 | 31147 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
from ..externals.joblib import Parallel, delayed
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
| bsd-3-clause |
Vict0rSch/deep_learning | keras/feedforward/feedforward_keras_mnist.py | 1 | 2623 | import time
import numpy as np
from matplotlib import pyplot as plt
from keras.utils import np_utils
import keras.callbacks as cb
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.datasets import mnist
class LossHistory(cb.Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
batch_loss = logs.get('loss')
self.losses.append(batch_loss)
def load_data():
print 'Loading data...'
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
X_train = np.reshape(X_train, (60000, 784))
X_test = np.reshape(X_test, (10000, 784))
print 'Data loaded.'
return [X_train, X_test, y_train, y_test]
def init_model():
start_time = time.time()
print 'Compiling Model ... '
model = Sequential()
model.add(Dense(500, input_dim=784))
model.add(Activation('relu'))
model.add(Dropout(0.4))
model.add(Dense(300))
model.add(Activation('relu'))
model.add(Dropout(0.4))
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])
print 'Model compield in {0} seconds'.format(time.time() - start_time)
return model
def run_network(data=None, model=None, epochs=20, batch=256):
try:
start_time = time.time()
if data is None:
X_train, X_test, y_train, y_test = load_data()
else:
X_train, X_test, y_train, y_test = data
if model is None:
model = init_model()
history = LossHistory()
print 'Training model...'
model.fit(X_train, y_train, nb_epoch=epochs, batch_size=batch,
callbacks=[history],
validation_data=(X_test, y_test), verbose=2)
print "Training duration : {0}".format(time.time() - start_time)
score = model.evaluate(X_test, y_test, batch_size=16)
print "Network's test score [loss, accuracy]: {0}".format(score)
return model, history.losses
except KeyboardInterrupt:
print ' KeyboardInterrupt'
return model, history.losses
def plot_losses(losses):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(losses)
ax.set_title('Loss per batch')
fig.show()
| gpl-2.0 |
luo66/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 241 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
sgenoud/scikit-learn | sklearn/setup.py | 1 | 2735 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for hmm
config.add_extension(
'_hmmc',
sources=['_hmmc.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
LohithBlaze/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 248 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
glouppe/scikit-learn | sklearn/utils/tests/test_validation.py | 54 | 18600 | """Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
has_fit_parameter,
check_is_fitted,
check_consistent_length,
)
from sklearn.exceptions import NotFittedError
from sklearn.exceptions import DataConversionWarning
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=(10, 10), dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M.ravel())
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
@ignore_warnings
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
assert_warns(DeprecationWarning, check_array, [0, 1, 2])
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [[]])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = assert_warns(DeprecationWarning, check_array, [42],
ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
| bsd-3-clause |
RensaProject/nodebox_linguistics_extended | nodebox_linguistics_extended/parser/nltk_lite/__init__.py | 10 | 2228 | # Natural Language Toolkit (NLTK-Lite)
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Authors: Steven Bird <sb@csse.unimelb.edu.au>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
NLTK-Lite is a collection of lightweight NLP modules designed for
maximum simplicity and efficiency. NLTK-Lite only covers the simple
variants of standard data structures and tasks. It makes extensive
use of iterators so that large tasks generate output as early as
possible.
Key differences from NLTK are as follows:
- tokens are represented as strings, tuples, or trees
- all tokenizers are iterators
- less object orientation
NLTK-Lite is primarily intended to facilitate teaching NLP to students
having limited programming experience. The focus is on teaching
Python together with the help of NLP recipes, instead of teaching
students to use a large set of specialized classes.
@version: 0.7a2
"""
##//////////////////////////////////////////////////////
## Metadata
##//////////////////////////////////////////////////////
# Version. For each new release, the version number should be updated
# here and in the Epydoc comment (above).
__version__ = "0.7a2"
# Copyright notice
__copyright__ = """\
Copyright (C) 2001-2006 University of Pennsylvania.
Distributed and Licensed under provisions of the GNU Public
License, which is included by reference.
"""
__license__ = "GNU Public License"
# Description of the toolkit, keywords, and the project's primary URL.
__longdescr__ = """\
The Natural Langauge Toolkit (NLTK-Lite) is a Python package for
processing natural language text. It was developed as a simpler,
lightweight version of NLTK. NLTK-Lite requires Python 2.4 or higher."""
__keywords__ = ['NLP', 'CL', 'natural language processing',
'computational linguistics', 'parsing', 'tagging',
'tokenizing', 'syntax', 'linguistics', 'language',
'natural language']
__url__ = "http://nltk.sf.net/"
# Maintainer, contributors, etc.
__maintainer__ = "Steven Bird"
__maintainer_email__ = "sb@csse.unimelb.edu.au"
__author__ = __maintainer__
__author_email__ = __maintainer_email__
| gpl-2.0 |
lukeiwanski/tensorflow | tensorflow/contrib/losses/python/metric_learning/metric_loss_ops_test.py | 40 | 20535 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for triplet_semihard_loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.losses.python import metric_learning as metric_loss_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn import metrics
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
def pairwise_distance_np(feature, squared=False):
"""Computes the pairwise distance matrix in numpy.
Args:
feature: 2-D numpy array of size [number of data, feature dimension]
squared: Boolean. If true, output is the pairwise squared euclidean
distance matrix; else, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: 2-D numpy array of size
[number of data, number of data].
"""
triu = np.triu_indices(feature.shape[0], 1)
upper_tri_pdists = np.linalg.norm(feature[triu[1]] - feature[triu[0]], axis=1)
if squared:
upper_tri_pdists **= 2.
num_data = feature.shape[0]
pairwise_distances = np.zeros((num_data, num_data))
pairwise_distances[np.triu_indices(num_data, 1)] = upper_tri_pdists
# Make symmetrical.
pairwise_distances = pairwise_distances + pairwise_distances.T - np.diag(
pairwise_distances.diagonal())
return pairwise_distances
class ContrastiveLossTest(test.TestCase):
def testContrastive(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(0, 2, size=(num_data,)).astype(np.float32)
# Compute the loss in NP
dist = np.sqrt(
np.sum(np.square(embeddings_anchor - embeddings_positive), axis=1))
loss_np = np.mean(
labels * np.square(dist) +
(1.0 - labels) * np.square(np.maximum(margin - dist, 0.0)))
# Compute the loss with TF
loss_tf = metric_loss_ops.contrastive_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class TripletSemiHardLossTest(test.TestCase):
def testTripletSemiHard(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
num_classes = 4
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP.
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
pdist_matrix = pairwise_distance_np(embedding, squared=True)
loss_np = 0.0
num_positives = 0.0
for i in range(num_data):
for j in range(num_data):
if adjacency[i][j] > 0.0 and i != j:
num_positives += 1.0
pos_distance = pdist_matrix[i][j]
neg_distances = []
for k in range(num_data):
if adjacency[i][k] == 0:
neg_distances.append(pdist_matrix[i][k])
# Sort by distance.
neg_distances.sort()
chosen_neg_distance = neg_distances[0]
for l in range(len(neg_distances)):
chosen_neg_distance = neg_distances[l]
if chosen_neg_distance > pos_distance:
break
loss_np += np.maximum(
0.0, margin - chosen_neg_distance + pos_distance)
loss_np /= num_positives
# Compute the loss in TF.
loss_tf = metric_loss_ops.triplet_semihard_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embedding),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class LiftedStructLossTest(test.TestCase):
def testLiftedStruct(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
num_classes = 4
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
pdist_matrix = pairwise_distance_np(embedding)
loss_np = 0.0
num_constraints = 0.0
for i in range(num_data):
for j in range(num_data):
if adjacency[i][j] > 0.0 and i != j:
d_pos = pdist_matrix[i][j]
negs = []
for k in range(num_data):
if not adjacency[i][k]:
negs.append(margin - pdist_matrix[i][k])
for l in range(num_data):
if not adjacency[j][l]:
negs.append(margin - pdist_matrix[j][l])
negs = np.array(negs)
max_elem = np.max(negs)
negs -= max_elem
negs = np.exp(negs)
soft_maximum = np.log(np.sum(negs)) + max_elem
num_constraints += 1.0
this_loss = max(soft_maximum + d_pos, 0)
loss_np += this_loss * this_loss
loss_np = loss_np / num_constraints / 2.0
# Compute the loss in TF
loss_tf = metric_loss_ops.lifted_struct_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embedding),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def convert_to_list_of_sparse_tensor(np_matrix):
list_of_sparse_tensors = []
nrows, ncols = np_matrix.shape
for i in range(nrows):
sp_indices = []
for j in range(ncols):
if np_matrix[i][j] == 1:
sp_indices.append([j])
num_non_zeros = len(sp_indices)
list_of_sparse_tensors.append(sparse_tensor.SparseTensor(
indices=np.array(sp_indices),
values=np.ones((num_non_zeros,)),
dense_shape=np.array([ncols,])))
return list_of_sparse_tensors
class NpairsLossTest(test.TestCase):
def testNpairs(self):
with self.test_session():
num_data = 15
feat_dim = 6
num_classes = 5
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP
reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
reg_term *= 0.25 * reg_lambda
similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)
labels_remapped = np.equal(
labels_reshaped, labels_reshaped.T).astype(np.float32)
labels_remapped /= np.sum(labels_remapped, axis=1, keepdims=True)
xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
logits=ops.convert_to_tensor(similarity_matrix),
labels=ops.convert_to_tensor(labels_remapped))).eval()
loss_np = xent_loss + reg_term
# Compute the loss in TF
loss_tf = metric_loss_ops.npairs_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class NpairsLossMultiLabelTest(test.TestCase):
def testNpairsMultiLabelLossWithSingleLabelEqualsNpairsLoss(self):
with self.test_session():
num_data = 15
feat_dim = 6
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.arange(num_data)
labels = np.reshape(labels, -1)
# Compute vanila npairs loss.
loss_npairs = metric_loss_ops.npairs_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda).eval()
# Compute npairs multilabel loss.
labels_one_hot = np.identity(num_data)
loss_npairs_multilabel = metric_loss_ops.npairs_loss_multilabel(
sparse_labels=convert_to_list_of_sparse_tensor(labels_one_hot),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda).eval()
self.assertAllClose(loss_npairs, loss_npairs_multilabel)
def testNpairsMultiLabel(self):
with self.test_session():
num_data = 15
feat_dim = 6
num_classes = 10
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(0, 2, (num_data, num_classes))
# set entire column to one so that each row has at least one bit set.
labels[:, -1] = 1
# Compute the loss in NP
reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
reg_term *= 0.25 * reg_lambda
similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)
labels_remapped = np.dot(labels, labels.T).astype(np.float)
labels_remapped /= np.sum(labels_remapped, 1, keepdims=True)
xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
logits=ops.convert_to_tensor(similarity_matrix),
labels=ops.convert_to_tensor(labels_remapped))).eval()
loss_np = xent_loss + reg_term
# Compute the loss in TF
loss_tf = metric_loss_ops.npairs_loss_multilabel(
sparse_labels=convert_to_list_of_sparse_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def compute_ground_truth_cluster_score(feat, y):
y_unique = np.unique(y)
score_gt_np = 0.0
for c in y_unique:
feat_subset = feat[y == c, :]
pdist_subset = pairwise_distance_np(feat_subset)
score_gt_np += -1.0 * np.min(np.sum(pdist_subset, axis=0))
score_gt_np = score_gt_np.astype(np.float32)
return score_gt_np
def compute_cluster_loss_numpy(feat,
y,
margin_multiplier=1.0,
enable_pam_finetuning=True):
if enable_pam_finetuning:
facility = ForwardGreedyFacility(
n_clusters=np.unique(y).size).pam_augmented_fit(feat, y,
margin_multiplier)
else:
facility = ForwardGreedyFacility(
n_clusters=np.unique(y).size).loss_augmented_fit(feat, y,
margin_multiplier)
score_augmented = facility.score_aug_
score_gt = compute_ground_truth_cluster_score(feat, y)
return np.maximum(np.float32(0.0), score_augmented - score_gt)
class ForwardGreedyFacility(object):
def __init__(self, n_clusters=8):
self.n_clusters = n_clusters
self.center_ics_ = None
def _check_init_args(self):
# Check n_clusters.
if (self.n_clusters is None or self.n_clusters <= 0 or
not isinstance(self.n_clusters, int)):
raise ValueError('n_clusters has to be nonnegative integer.')
def loss_augmented_fit(self, feat, y, loss_mult):
"""Fit K-Medoids to the provided data."""
self._check_init_args()
# Check that the array is good and attempt to convert it to
# Numpy array if possible.
feat = self._check_array(feat)
# Apply distance metric to get the distance matrix.
pdists = pairwise_distance_np(feat)
num_data = feat.shape[0]
candidate_ids = list(range(num_data))
candidate_scores = np.zeros(num_data,)
subset = []
k = 0
while k < self.n_clusters:
candidate_scores = []
for i in candidate_ids:
# push i to subset.
subset.append(i)
marginal_cost = -1.0 * np.sum(np.min(pdists[:, subset], axis=1))
loss = 1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset))
candidate_scores.append(marginal_cost + loss_mult * loss)
# remove i from subset.
subset.pop()
# push i_star to subset.
i_star = candidate_ids[np.argmax(candidate_scores)]
subset.append(i_star)
# remove i_star from candidate indices.
candidate_ids.remove(i_star)
k += 1
# Expose labels_ which are the assignments of
# the training data to clusters.
self.labels_ = self._get_cluster_ics(pdists, subset)
# Expose cluster centers, i.e. medoids.
self.cluster_centers_ = feat.take(subset, axis=0)
# Expose indices of chosen cluster centers.
self.center_ics_ = subset
# Expose the score = -\sum_{i \in V} min_{j \in S} || x_i - x_j ||
self.score_ = np.float32(-1.0) * self._get_facility_distance(pdists, subset)
self.score_aug_ = self.score_ + loss_mult * (
1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset)))
self.score_aug_ = self.score_aug_.astype(np.float32)
# Expose the chosen cluster indices.
self.subset_ = subset
return self
def _augmented_update_medoid_ics_in_place(self, pdists, y_gt, cluster_ics,
medoid_ics, loss_mult):
for cluster_idx in range(self.n_clusters):
# y_pred = self._get_cluster_ics(D, medoid_ics)
# Don't prematurely do the assignment step.
# Do this after we've updated all cluster medoids.
y_pred = cluster_ics
if sum(y_pred == cluster_idx) == 0:
# Cluster is empty.
continue
curr_score = (
-1.0 * np.sum(
pdists[medoid_ics[cluster_idx], y_pred == cluster_idx]) +
loss_mult * (1.0 - metrics.normalized_mutual_info_score(
y_gt, y_pred)))
pdist_in = pdists[y_pred == cluster_idx, :]
pdist_in = pdist_in[:, y_pred == cluster_idx]
all_scores_fac = np.sum(-1.0 * pdist_in, axis=1)
all_scores_loss = []
for i in range(y_pred.size):
if y_pred[i] != cluster_idx:
continue
# remove this cluster's current centroid
medoid_ics_i = medoid_ics[:cluster_idx] + medoid_ics[cluster_idx + 1:]
# add this new candidate to the centroid list
medoid_ics_i += [i]
y_pred_i = self._get_cluster_ics(pdists, medoid_ics_i)
all_scores_loss.append(loss_mult * (
1.0 - metrics.normalized_mutual_info_score(y_gt, y_pred_i)))
all_scores = all_scores_fac + all_scores_loss
max_score_idx = np.argmax(all_scores)
max_score = all_scores[max_score_idx]
if max_score > curr_score:
medoid_ics[cluster_idx] = np.where(
y_pred == cluster_idx)[0][max_score_idx]
def pam_augmented_fit(self, feat, y, loss_mult):
pam_max_iter = 5
self._check_init_args()
feat = self._check_array(feat)
pdists = pairwise_distance_np(feat)
self.loss_augmented_fit(feat, y, loss_mult)
print('PAM -1 (before PAM): score: %f, score_aug: %f' % (
self.score_, self.score_aug_))
# Initialize from loss augmented facility location
subset = self.center_ics_
for iter_ in range(pam_max_iter):
# update the cluster assignment
cluster_ics = self._get_cluster_ics(pdists, subset)
# update the medoid for each clusters
self._augmented_update_medoid_ics_in_place(pdists, y, cluster_ics, subset,
loss_mult)
self.score_ = np.float32(-1.0) * self._get_facility_distance(
pdists, subset)
self.score_aug_ = self.score_ + loss_mult * (
1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset)))
self.score_aug_ = self.score_aug_.astype(np.float32)
print('PAM iter: %d, score: %f, score_aug: %f' % (iter_, self.score_,
self.score_aug_))
self.center_ics_ = subset
self.labels_ = cluster_ics
return self
def _check_array(self, feat):
# Check that the number of clusters is less than or equal to
# the number of samples
if self.n_clusters > feat.shape[0]:
raise ValueError('The number of medoids ' + '({}) '.format(
self.n_clusters) + 'must be larger than the number ' +
'of samples ({})'.format(feat.shape[0]))
return feat
def _get_cluster_ics(self, pdists, subset):
"""Returns cluster indices for pdist and current medoid indices."""
# Assign data points to clusters based on
# which cluster assignment yields
# the smallest distance`
cluster_ics = np.argmin(pdists[subset, :], axis=0)
return cluster_ics
def _get_facility_distance(self, pdists, subset):
return np.sum(np.min(pdists[subset, :], axis=0))
class ClusterLossTest(test.TestCase):
def _genClusters(self, n_samples, n_clusters):
blobs = datasets.make_blobs(
n_samples=n_samples, centers=n_clusters)
embedding, labels = blobs
embedding = (embedding - embedding.mean(axis=0)) / embedding.std(axis=0)
embedding = embedding.astype(np.float32)
return embedding, labels
def testClusteringLossPAMOff(self):
if not HAS_SKLEARN:
return
with self.test_session():
margin_multiplier = 10.0
embeddings, labels = self._genClusters(n_samples=128, n_clusters=64)
loss_np = compute_cluster_loss_numpy(
embeddings, labels, margin_multiplier, enable_pam_finetuning=False)
loss_tf = metric_loss_ops.cluster_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embeddings),
margin_multiplier=margin_multiplier,
enable_pam_finetuning=False)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def testClusteringLossPAMOn(self):
if not HAS_SKLEARN:
return
with self.test_session():
margin_multiplier = 10.0
embeddings, labels = self._genClusters(n_samples=128, n_clusters=64)
loss_np = compute_cluster_loss_numpy(
embeddings, labels, margin_multiplier, enable_pam_finetuning=True)
loss_tf = metric_loss_ops.cluster_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embeddings),
margin_multiplier=margin_multiplier,
enable_pam_finetuning=True)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
if __name__ == '__main__':
test.main()
| apache-2.0 |
MohammedWasim/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 102 | 22297 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
jfconavarrete/kaggle-facebook | src/data/pre_process_k_means.py | 1 | 1226 | import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
import cv2
from sklearn import metrics
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
from sklearn import cross_validation as cv
from sklearn import svm
from sklearn import ensemble
from sklearn import linear_model
def main():
train = pd.read_csv('../../data/raw/train.csv')
print train.shape
uniq = train['place_id'].nunique()
print uniq
col_headers = list(train.columns.values)
print col_headers
train[col_headers[1:-1]] = train[col_headers[1:-1]].apply(lambda x: (x - x.min()) / (x.max() - x.min()))
train['accuracy'] = 1 - train['accuracy']
train_X_norm = train.values[:,:-1]
print train_X_norm.shape
K = uniq
clusters = range(0,K)
batch_size = 500
n_init = 10
train_X_norm = train_X_norm.astype(np.float32)
print train_X_norm.dtype
print train_X_norm.shape
# define criteria and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ret, label, center = cv2.kmeans(train_X_norm, K, criteria, n_init, cv2.KMEANS_RANDOM_CENTERS)
print center.shape
if __name__ == '__main__':
main() | mit |
tomsilver/nupic | examples/opf/experiments/spatial_classification/category_1/description.py | 17 | 1557 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/category_1.csv'),
'errorMetric': 'avg_err',
'modelParams': {
'sensorParams': { 'verbosity': 0},
'clParams': {
'clVerbosity': 0,
},
}
}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| gpl-3.0 |
MohammedWasim/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 247 | 2903 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
LohithBlaze/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 250 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
jzt5132/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 292 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
jzt5132/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 274 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
automl/auto-sklearn | test/test_metalearning/pyMetaLearn/test_metalearning_configuration.py | 1 | 1879 | import logging
import os
import autosklearn.metalearning.optimizers.metalearn_optimizer.metalearner as metalearner # noqa: E501
import autosklearn.pipeline.classification
from autosklearn.metalearning.metalearning.meta_base import MetaBase
import unittest
logging.basicConfig()
class MetalearningConfiguration(unittest.TestCase):
def test_metalearning_cs_size(self):
self.cwd = os.getcwd()
data_dir = os.path.dirname(__file__)
data_dir = os.path.join(data_dir, "test_meta_base_data")
os.chdir(data_dir)
# Total: 176, categorical: 3, numerical: 7, string: 7
total = 179
num_numerical = 6
num_string = 11
num_categorical = 3
for feat_type, cs_size in [
({"A": "numerical"}, total - num_string - num_categorical),
({"A": "categorical"}, total - num_string - num_numerical),
({"A": "string"}, total - num_categorical - num_numerical),
({"A": "numerical", "B": "categorical"}, total - num_string),
({"A": "numerical", "B": "string"}, total - num_categorical),
({"A": "categorical", "B": "string"}, total - num_numerical),
({"A": "categorical", "B": "string", "C": "numerical"}, total),
]:
pipeline = autosklearn.pipeline.classification.SimpleClassificationPipeline(
feat_type=feat_type
)
self.cs = pipeline.get_hyperparameter_search_space(feat_type=feat_type)
self.logger = logging.getLogger()
meta_base = MetaBase(self.cs, data_dir, logger=self.logger)
self.meta_optimizer = metalearner.MetaLearningOptimizer(
"233", self.cs, meta_base, logger=self.logger
)
self.assertEqual(
len(self.meta_optimizer.configuration_space), cs_size, feat_type
)
| bsd-3-clause |
sgenoud/scikit-learn | sklearn/cluster/tests/test_k_means.py | 3 | 18449 | """Testing for K-means"""
import numpy as np
import warnings
from scipy import sparse as sp
from numpy.testing import assert_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from nose import SkipTest
from nose.tools import assert_almost_equal
from nose.tools import assert_raises
from nose.tools import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.fixes import unique
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.cluster._k_means import csr_row_norm_l2
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_square_norms():
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_from_csr = csr_row_norm_l2(X_csr)
assert_array_almost_equal(x_squared_norms,
x_squared_norms_from_csr, 5)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
with warnings.catch_warnings(record=True) as w:
assert_array_equal(km.labels_, km.predict(X))
assert_equal(len(w), 1)
def test_labels_assignement_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignement using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignement using the sparse CSR input
x_squared_norms_from_csr = csr_row_norm_l2(X_csr)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
"""Check that dense and sparse minibatch update give the same results"""
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = csr_row_norm_l2(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignements are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
k_means = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(k_means)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = unique(this_labels,
return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
def _get_mac_os_version():
import platform
mac_version, _, _ = platform.mac_ver()
if mac_version:
# turn something like '10.7.3' into '10.7'
return '.'.join(mac_version.split('.')[:2])
def test_k_means_plus_plus_init_2_jobs():
if _get_mac_os_version() == '10.7':
raise SkipTest('Multi-process bug in Mac OS X Lion (see issue #636)')
k_means = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(k_means)
def test_k_means_plus_plus_init_sparse():
k_means = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
k_means.fit(X_csr)
_check_fitted_model(k_means)
def test_k_means_random_init():
k_means = KMeans(init="random", n_clusters=n_clusters, random_state=42)
k_means.fit(X)
_check_fitted_model(k_means)
def test_k_means_random_init_sparse():
k_means = KMeans(init="random", n_clusters=n_clusters, random_state=42)
k_means.fit(X_csr)
_check_fitted_model(k_means)
def test_k_means_plus_plus_init_not_precomputed():
k_means = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(k_means)
def test_k_means_random_init_not_precomputed():
k_means = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(k_means)
def test_k_means_perfect_init():
k_means = KMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1)
k_means.fit(X)
_check_fitted_model(k_means)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
with warnings.catch_warnings(record=True) as warn_queue:
mb_k_means.fit(X)
assert_equal(len(warn_queue), 1)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
mb_k_means = MiniBatchKMeans(init=test_init, random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
k_means = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, k_means.fit, X)
def test_mini_match_k_means_invalid_init():
k_means = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, k_means.fit, X)
def test_k_means_copyx():
"""Check if copy_x=False returns nearly equal X after de-centering."""
my_X = X.copy()
k_means = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
k_means.fit(my_X)
_check_fitted_model(k_means)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
"""Check k_means with a bad initialization does not yield a singleton
Starting with bad centers that are quickly ignored should not
result in a repositioning of the centers to the center of mass that
would lead to collapsed centers which in turns make the clustering
dependent of the numerical unstabilities.
"""
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
k_means = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
k_means.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(k_means.labels_)), 3)
centers = k_means.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
k_means = KMeans(n_clusters=n_clusters, random_state=42)
k_means.fit(X)
# sanity check: predict centroid labels
pred = k_means.predict(k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = k_means.predict(X)
assert_array_equal(pred, k_means.labels_)
# re-predict labels for training set using fit_predict
pred = k_means.fit_predict(X)
assert_array_equal(pred, k_means.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2, init=init_int).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2, init=init_int).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
k_means = KMeans(n_clusters=n_clusters)
k_means.fit(X)
X_new = k_means.transform(k_means.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_n_init():
"""Check that increasing the number of init increases the quality"""
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
| bsd-3-clause |